[
  {
    "path": "README.md",
    "content": "# RTSP-Camera-for-Android\nAndroid based RTSP Server which is able to serve live camera view to multiple RTSP clients, such as VLC.\n\nThis project is not maintained anymore (in fact since 2end of 2012).\nIt exists to share the code how to implement this back in the days.\n\nI've not tested out the following gitHub project on my own, but if you are looking for a more actual Android RTSP based solution, pls check out:\n* https://github.com/hypeapps/Endoscope\n\nthanks for all the fish\n(=PA=)\n"
  },
  {
    "path": "RtspCamera/.classpath",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n<classpath>\r\n\t<classpathentry kind=\"src\" path=\"src\"/>\r\n\t<classpathentry kind=\"src\" path=\"gen\"/>\r\n\t<classpathentry kind=\"con\" path=\"com.android.ide.eclipse.adt.ANDROID_FRAMEWORK\"/>\r\n\t<classpathentry exported=\"true\" kind=\"con\" path=\"com.android.ide.eclipse.adt.LIBRARIES\"/>\r\n\t<classpathentry kind=\"output\" path=\"bin/classes\"/>\r\n</classpath>\r\n"
  },
  {
    "path": "RtspCamera/.gitignore",
    "content": "/bin\n/gen\n"
  },
  {
    "path": "RtspCamera/.project",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n<projectDescription>\r\n\t<name>RtspCamera</name>\r\n\t<comment></comment>\r\n\t<projects>\r\n\t</projects>\r\n\t<buildSpec>\r\n\t\t<buildCommand>\r\n\t\t\t<name>com.android.ide.eclipse.adt.ResourceManagerBuilder</name>\r\n\t\t\t<arguments>\r\n\t\t\t</arguments>\r\n\t\t</buildCommand>\r\n\t\t<buildCommand>\r\n\t\t\t<name>com.android.ide.eclipse.adt.PreCompilerBuilder</name>\r\n\t\t\t<arguments>\r\n\t\t\t</arguments>\r\n\t\t</buildCommand>\r\n\t\t<buildCommand>\r\n\t\t\t<name>org.eclipse.jdt.core.javabuilder</name>\r\n\t\t\t<arguments>\r\n\t\t\t</arguments>\r\n\t\t</buildCommand>\r\n\t\t<buildCommand>\r\n\t\t\t<name>com.android.ide.eclipse.adt.ApkBuilder</name>\r\n\t\t\t<arguments>\r\n\t\t\t</arguments>\r\n\t\t</buildCommand>\r\n\t</buildSpec>\r\n\t<natures>\r\n\t\t<nature>com.android.ide.eclipse.adt.AndroidNature</nature>\r\n\t\t<nature>org.eclipse.jdt.core.javanature</nature>\r\n\t</natures>\r\n</projectDescription>\r\n"
  },
  {
    "path": "RtspCamera/.settings/org.jboss.ide.eclipse.as.core.prefs",
    "content": "eclipse.preferences.version=1\r\norg.jboss.ide.eclipse.as.core.singledeployable.deployableList=\r\n"
  },
  {
    "path": "RtspCamera/AndroidManifest.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\"\n    package=\"de.kp.rtspcamera\"\n    android:versionCode=\"1\"\n    android:versionName=\"1.0\" >\n\n    <uses-sdk\n        android:minSdkVersion=\"5\"\n        android:targetSdkVersion=\"6\" />\n\n    <uses-feature android:name=\"android.hardware.camera\" />\n\n    <uses-permission android:name=\"android.permission.CAMERA\" />\n    <uses-permission android:name=\"android.permission.INTERNET\" />\n    <uses-permission android:name=\"android.permission.WRITE_EXTERNAL_STORAGE\" />\n    <uses-permission android:name=\"android.permission.WAKE_LOCK\" />\n\n    <application\n        android:icon=\"@drawable/icon\" \n        android:debuggable=\"true\"\n        android:label=\"@string/app_name\" >\n        <activity\n            android:name=\".RtspNativeCodecsCamera\"\n            android:label=\"@string/app_name\"\n            android:screenOrientation=\"landscape\" >\n            <intent-filter>\n                <action android:name=\"android.intent.action.MAIN\" />\n\n                <category android:name=\"android.intent.category.LAUNCHER\" />\n            </intent-filter>\n        </activity>\n    </application>\n\n</manifest>"
  },
  {
    "path": "RtspCamera/gpl.txt",
    "content": "                    GNU GENERAL PUBLIC LICENSE\n                       Version 3, 29 June 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU General Public License is a free, copyleft license for\nsoftware and other kinds of works.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nthe GNU General Public License is intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.  We, the Free Software Foundation, use the\nGNU General Public License for most of our software; it applies also to\nany other work released this way by its authors.  You can apply it to\nyour programs, too.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  To protect your rights, we need to prevent others from denying you\nthese rights or asking you to surrender the rights.  Therefore, you have\ncertain responsibilities if you distribute copies of the software, or if\nyou modify it: responsibilities to respect the freedom of others.\n\n  For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must pass on to the recipients the same\nfreedoms that you received.  You must make sure that they, too, receive\nor can get the source code.  And you must show them these terms so they\nknow their rights.\n\n  Developers that use the GNU GPL protect your rights with two steps:\n(1) assert copyright on the software, and (2) offer you this License\ngiving you legal permission to copy, distribute and/or modify it.\n\n  For the developers' and authors' protection, the GPL clearly explains\nthat there is no warranty for this free software.  For both users' and\nauthors' sake, the GPL requires that modified versions be marked as\nchanged, so that their problems will not be attributed erroneously to\nauthors of previous versions.\n\n  Some devices are designed to deny users access to install or run\nmodified versions of the software inside them, although the manufacturer\ncan do so.  This is fundamentally incompatible with the aim of\nprotecting users' freedom to change the software.  The systematic\npattern of such abuse occurs in the area of products for individuals to\nuse, which is precisely where it is most unacceptable.  Therefore, we\nhave designed this version of the GPL to prohibit the practice for those\nproducts.  If such problems arise substantially in other domains, we\nstand ready to extend this provision to those domains in future versions\nof the GPL, as needed to protect the freedom of users.\n\n  Finally, every program is threatened constantly by software patents.\nStates should not allow patents to restrict development and use of\nsoftware on general-purpose computers, but in those that do, we wish to\navoid the special danger that patents applied to a free program could\nmake it effectively proprietary.  To prevent this, the GPL assures that\npatents cannot be used to render the program non-free.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Use with the GNU Affero General Public License.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU Affero General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the special requirements of the GNU Affero General Public License,\nsection 13, concerning interaction through a network will apply to the\ncombination as such.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU General Public License from time to time.  Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU General Public License for more details.\n\n    You should have received a copy of the GNU General Public License\n    along with this program.  If not, see <http://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If the program does terminal interaction, make it output a short\nnotice like this when it starts in an interactive mode:\n\n    <program>  Copyright (C) <year>  <name of author>\n    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n    This is free software, and you are welcome to redistribute it\n    under certain conditions; type `show c' for details.\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License.  Of course, your program's commands\nmight be different; for a GUI interface, you would use an \"about box\".\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU GPL, see\n<http://www.gnu.org/licenses/>.\n\n  The GNU General Public License does not permit incorporating your program\ninto proprietary programs.  If your program is a subroutine library, you\nmay consider it more useful to permit linking proprietary applications with\nthe library.  If this is what you want to do, use the GNU Lesser General\nPublic License instead of this License.  But first, please read\n<http://www.gnu.org/philosophy/why-not-lgpl.html>.\n"
  },
  {
    "path": "RtspCamera/jni/Android.mk",
    "content": "include $(call all-subdir-makefiles)\n"
  },
  {
    "path": "RtspCamera/jni/Application.mk",
    "content": "APP_PROJECT_PATH := /arwa/git/RTSP-Camera-for-Android/RtspCamera\nAPP_MODULES      := libH264Decoder libH264Encoder libH263Encoder libH263Decoder \n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/Android.mk",
    "content": "AVC_ROOT:= $(call my-dir)\ninclude $(call all-subdir-makefiles)\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/common/include/avcapi_common.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/**\nThis file contains common type definitions and enumerations used by AVC encoder\nand decoder libraries which are exposed to the users.\n@publishedAll\n*/\n\n#ifndef AVCAPI_COMMON_H_INCLUDED\n#define AVCAPI_COMMON_H_INCLUDED\n\n// xxx pa deact PV_MEMORY_POOL for test\n#define PV_MEMORY_POOL\n\n/**\nThis is common return status.\n@publishedAll\n*/\ntypedef enum\n{\n    AVC_NO_BUFFER = -2,\n    AVC_MEMORY_FAIL = -1,\n    AVC_FAIL = 0,\n    AVC_SUCCESS = 1,\n    AVC_PICTURE_OUTPUT_READY = 2\n} AVCStatus;\n\n/**\nThis enumeration is for profiles. The value follows the profile_idc  in sequence\nparameter set rbsp. See Annex A.\n@publishedAll\n*/\ntypedef enum\n{\n    AVC_BASELINE = 66,\n    AVC_MAIN = 77,\n    AVC_EXTENDED = 88,\n    AVC_HIGH = 100,\n    AVC_HIGH10 = 110,\n    AVC_HIGH422 = 122,\n    AVC_HIGH444 = 144\n} AVCProfile;\n\n/**\nThis enumeration is for levels. The value follows the level_idc in sequence\nparameter set rbsp. See Annex A.\n@published All\n*/\ntypedef enum\n{\n    AVC_LEVEL_AUTO = 0,\n    AVC_LEVEL1_B = 9,\n    AVC_LEVEL1 = 10,\n    AVC_LEVEL1_1 = 11,\n    AVC_LEVEL1_2 = 12,\n    AVC_LEVEL1_3 = 13,\n    AVC_LEVEL2 = 20,\n    AVC_LEVEL2_1 = 21,\n    AVC_LEVEL2_2 = 22,\n    AVC_LEVEL3 = 30,\n    AVC_LEVEL3_1 = 31,\n    AVC_LEVEL3_2 = 32,\n    AVC_LEVEL4 = 40,\n    AVC_LEVEL4_1 = 41,\n    AVC_LEVEL4_2 = 42,\n    AVC_LEVEL5 = 50,\n    AVC_LEVEL5_1 = 51\n} AVCLevel;\n\n/**\nThis enumeration follows Table 7-1 for NAL unit type codes.\nThis may go to avccommon_api.h later (external common).\n@publishedAll\n*/\ntypedef enum\n{\n    AVC_NALTYPE_SLICE = 1,  /* non-IDR non-data partition */\n    AVC_NALTYPE_DPA = 2,    /* data partition A */\n    AVC_NALTYPE_DPB = 3,    /* data partition B */\n    AVC_NALTYPE_DPC = 4,    /* data partition C */\n    AVC_NALTYPE_IDR = 5,    /* IDR NAL */\n    AVC_NALTYPE_SEI = 6,    /* supplemental enhancement info */\n    AVC_NALTYPE_SPS = 7,    /* sequence parameter set */\n    AVC_NALTYPE_PPS = 8,    /* picture parameter set */\n    AVC_NALTYPE_AUD = 9,    /* access unit delimiter */\n    AVC_NALTYPE_EOSEQ = 10, /* end of sequence */\n    AVC_NALTYPE_EOSTREAM = 11, /* end of stream */\n    AVC_NALTYPE_FILL = 12   /* filler data */\n} AVCNalUnitType;\n\n/**\nThis enumeration specifies debug logging type.\nThis may go to avccommon_api.h later (external common).\n@publishedAll\n*/\ntypedef enum\n{\n    AVC_LOGTYPE_ERROR = 0,\n    AVC_LOGTYPE_WARNING = 1,\n    AVC_LOGTYPE_INFO = 2\n} AVCLogType;\n\n/**\nThis enumerate the status of certain flags.\n@publishedAll\n*/\ntypedef enum\n{\n    AVC_OFF = 0,\n    AVC_ON = 1\n} AVCFlag;\n\n/**\nThis structure contains input information.\nNote, this structure is identical to AVCDecOutput for now.\n*/\ntypedef struct tagAVCFrameIO\n{\n    /** A unique identification number for a particular instance of this structure.\n    To remain unchanged by the application between the time when it is given to the\n    library and the time when the library returns it back. */\n    uint32 id;\n\n    /** Array of pointers to Y,Cb,Cr content in 4:2:0 format. For AVC decoding,\n    this memory is allocated by the AVC decoder library. For AVC encoding, only the\n    memory for original unencoded frame is allocated by the application. Internal\n    memory is also allocated by the AVC encoder library. */\n    uint8 *YCbCr[3];\n\n    /** In/Out: Coded width of the luma component, it has to be multiple of 16. */\n    int pitch;\n\n    /** In/Out: Coded height of the luma component, must be multiple of 16. */\n    int height;\n\n    /** In/Out: Display width, less than picth */\n    int clip_width;\n\n    /** In/Out: Display height, less than height */\n    int clip_height;\n\n    /** Input: Origin of the display area [0]=>row, [1]=>column  */\n    int clip_origin[2];\n\n    /** Output: Frame number in de/encoding order (not necessary)*/\n    uint32 coding_order;\n\n    /** Output: Frame number in displaying order (this may or may not be associated with the POC at all!!!). */\n    uint32 disp_order;\n\n    /** In/Out: Flag for use for reference or not. */\n    uint  is_reference;\n\n    /** In/Out: Coding timestamp in msec (not display timestamp) */\n    uint32 coding_timestamp;\n\n    /* there could be something else here such as format, DON (decoding order number)\n     if available thru SEI, etc. */\n} AVCFrameIO;\n\n\n/** CALLBACK FUNCTION TO BE IMPLEMENTED BY APPLICATION */\n/** In AVCDecControls structure, userData is a pointer to an object with the following\n    member functions.\n*/\n\n\n/** @brief Decoded picture buffers (DPB) must be allocated or re-allocated before an\n    IDR frame is decoded. If PV_MEMORY_POOL is not defined, AVC lib will allocate DPB\n    internally which cannot be shared with the application. In that case, this function\n    will not be called.\n    @param userData  The same value of userData in AVCHandle object.\n    @param frame_size_in_mbs  The size of each frame in number of macroblocks.\n    @param num_frames The number of frames in DPB.\n    @return 1 for success, 0 for fail (cannot allocate DPB)\n*/\n\ntypedef int (*FunctionType_DPBAlloc)(void *userData, uint frame_size_in_mbs, uint num_buffers);\n\n/** @brief AVC library calls this function is reserve a memory of one frame from the DPB.\n    Once reserved, this frame shall not be deleted or over-written by the app.\n    @param userData  The same value of userData in AVCHandle object.\n    @param indx      Index of a frame in DPB (AVC library keeps track of the index).\n    @param yuv      The address of the yuv pointer returned to the AVC lib.\n    @return         1 for success, 0 for fail (no frames available to bind).\n    */\ntypedef int (*FunctionType_FrameBind)(void *userData, int indx, uint8 **yuv);\n\n/** @brief AVC library calls this function once a bound frame is not needed for decoding\n    operation (falls out of the sliding window, or marked unused for reference).\n    @param userData  The same value of userData in AVCHandle object.\n    @param indx      Index of frame to be unbound (AVC library keeps track of the index).\n    @return  none.\n*/\ntypedef void (*FuctionType_FrameUnbind)(void *userData, int);\n\n/** Pointer to malloc function for general memory allocation, so that application can keep track of\n    memory usage.\n\\param \"size\" \"Size of requested memory in bytes.\"\n\\param \"attribute\" \"Some value specifying types, priority, etc. of the memory.\"\n\\return \"The address of the allocated memory casted to int\"\n*/\ntypedef int (*FunctionType_Malloc)(void *userData, int32 size, int attribute);\n\n/** Function pointer to free\n\\param \"mem\" \"Pointer to the memory to be freed casted to int\"\n\\return \"void\"\n*/\ntypedef void (*FunctionType_Free)(void *userData, int mem);\n\n/** Debug logging information is returned to the application thru this function.\n\\param \"type\"   \"Type of logging message, see definition of AVCLogType.\"\n\\param \"string1\"    \"Logging message.\"\n\\param \"string2\"    \"To be defined.\"\n*/\ntypedef void (*FunctionType_DebugLog)(uint32 *userData, AVCLogType type, char *string1, int val1, int val2);\n\n/**\nThis structure has to be allocated and maintained by the user of the library.\nThis structure is used as a handle to the library object.\n*/\ntypedef struct tagAVCHandle\n{\n    /** A pointer to the internal data structure. Users have to make sure that this value\n        is NULL at the beginning.\n    */\n    void        *AVCObject;\n\n    /** A pointer to user object which has the following member functions used for\n    callback purpose.  !!! */\n    void        *userData;\n\n    /** Pointers to functions implemented by the users of AVC library */\n    FunctionType_DPBAlloc CBAVC_DPBAlloc;\n\n    FunctionType_FrameBind CBAVC_FrameBind;\n\n    FuctionType_FrameUnbind CBAVC_FrameUnbind;\n\n    FunctionType_Malloc CBAVC_Malloc;\n\n    FunctionType_Free  CBAVC_Free;\n\n    FunctionType_DebugLog CBAVC_DebugLog;\n\n    /** Flag to enable debugging */\n    uint32  debugEnable;\n\n} AVCHandle;\n\n\n\n#ifdef PVDEBUGMSG_LOG\n#define DEBUG_LOG(a,b,c,d,e)    CBAVC_DebugLog(a,b,c,d,e)\n#else\n#define DEBUG_LOG(a,b,c,d,e)\n#endif\n\n#endif /* _AVCAPI_COMMON_H_ */\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/common/include/avcint_common.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/**\nThis file contains common code shared between AVC decoder and AVC encoder for\ninternal use only.\n@publishedAll\n*/\n\n#ifndef AVCINT_COMMON_H_INCLUDED\n#define AVCINT_COMMON_H_INCLUDED\n\n#ifndef OSCL_TYPES_H_INCLUDED\n#include \"oscl_types.h\"\n#endif\n#ifndef AVCAPI_COMMON_H_INCLUDED\n#include \"avcapi_common.h\"\n#endif\n\n\n#ifndef TRUE\n#define TRUE  1\n#define FALSE 0\n#endif\n\n\n\n/**\nMathematic functions defined in subclause 5.7.\nCan be replaced with assembly instructions for speedup.\n@publishedAll\n*/\n#define AVC_ABS(x)   (((x)<0)? -(x) : (x))\n#define AVC_SIGN(x)  (((x)<0)? -1 : 1)\n#define AVC_SIGN0(x) (((x)<0)? -1 : (((x)>0) ? 1 : 0))\n#define AVC_MAX(x,y) ((x)>(y)? (x):(y))\n#define AVC_MIN(x,y) ((x)<(y)? (x):(y))\n#define AVC_MEDIAN(A,B,C) ((A) > (B) ? ((A) < (C) ? (A) : (B) > (C) ? (B) : (C)): (B) < (C) ? (B) : (C) > (A) ? (C) : (A))\n#define AVC_CLIP3(a,b,x) (AVC_MAX(a,AVC_MIN(x,b)))  /* clip x between a and b */\n#define AVC_CLIP(x)  AVC_CLIP3(0,255,x)\n#define AVC_FLOOR(x) ((int)(x))\n#define AVC_RASTER_SCAN(x,y,n)  ((x)+(y)*(n))\n#define AVC_ROUND(x) (AVC_SIGN(x)*AVC_FLOOR(AVC_ABS(x)+0.5))\n#define AVC_INVERSE_RASTER_SCAN(a,b,c,d,e) (((e)==0)? (((a)%((d)/(b)))*(b)): (((a)/((d)/(b)))*(c)))\n/* a:block address, b:block width, c:block height, d:total_width, e:x or y coordinate */\n\n#define DEFAULT_ATTR  0  /* default memory attribute  */\n#define FAST_MEM_ATTR 1  /* fast memory attribute */\n\n\n/* This section is for definition of constants. */\n#define MB_SIZE 16\n#define BLOCK_SIZE 4\n#define EMULATION_PREVENTION_THREE_BYTE 0x3\n#define NUM_PIXELS_IN_MB  (24*16)\n#define NUM_BLKS_IN_MB 24\n\n#define AVCNumI4PredMode  9\n#define AVCNumI16PredMode  4\n#define AVCNumIChromaMode  4\n\n/* constants used in the structures below */\n#define MAXIMUMVALUEOFcpb_cnt   32  /* used in HRDParams */\n#define MAX_NUM_REF_FRAMES_IN_PIC_ORDER_CNT_CYCLE 255   /* used in SeqParamSet */\n#define MAX_NUM_SLICE_GROUP  8      /* used in PicParamSet */\n#define MAX_REF_PIC_LIST_REORDERING 32  /* 32 is maximum according to Annex A, SliceHeader */\n#define MAX_DEC_REF_PIC_MARKING 64   /* 64 is the maximum possible given the max num ref pictures to 31. */\n#define MAX_FS (16+1)  /* pre-defined size of frame store array */\n#define MAX_LEVEL_IDX  15  /* only 15 levels defined for now */\n#define MAX_REF_PIC_LIST 33 /* max size of the RefPicList0 and RefPicList1 */\n\n\n/**\nArchitectural related macros.\n@publishedAll\n*/\n#ifdef USE_PRED_BLOCK\n#define MB_BASED_DEBLOCK\n#endif\n\n/**\nPicture type, PV created.\n@publishedAll\n*/\ntypedef enum\n{\n    AVC_FRAME = 3\n} AVCPictureType;\n\n/**\nThis slice type follows Table 7-3. The bottom 5 items may not needed.\n@publishedAll\n*/\ntypedef enum\n{\n    AVC_P_SLICE = 0,\n    AVC_B_SLICE = 1,\n    AVC_I_SLICE = 2,\n    AVC_SP_SLICE = 3,\n    AVC_SI_SLICE = 4,\n    AVC_P_ALL_SLICE = 5,\n    AVC_B_ALL_SLICE = 6,\n    AVC_I_ALL_SLICE = 7,\n    AVC_SP_ALL_SLICE = 8,\n    AVC_SI_ALL_SLICE = 9\n} AVCSliceType;\n\n/**\nTypes of the macroblock and partition. PV Created.\n@publishedAll\n*/\ntypedef enum\n{\n    /* intra */\n    AVC_I4,\n    AVC_I16,\n    AVC_I_PCM,\n    AVC_SI4,\n\n    /* inter for both P and B*/\n    AVC_BDirect16,\n    AVC_P16,\n    AVC_P16x8,\n    AVC_P8x16,\n    AVC_P8,\n    AVC_P8ref0,\n    AVC_SKIP\n} AVCMBMode;\n\n/**\nEnumeration for sub-macroblock mode, interpreted from sub_mb_type.\n@publishedAll\n*/\ntypedef enum\n{\n    /* for sub-partition mode */\n    AVC_BDirect8,\n    AVC_8x8,\n    AVC_8x4,\n    AVC_4x8,\n    AVC_4x4\n} AVCSubMBMode;\n\n/**\nMode of prediction of partition or sub-partition. PV Created.\nDo not change the order!!! Used in table look-up mode prediction in\nvlc.c.\n@publishedAll\n*/\ntypedef enum\n{\n    AVC_Pred_L0 = 0,\n    AVC_Pred_L1,\n    AVC_BiPred,\n    AVC_Direct\n} AVCPredMode;\n\n\n/**\nMode of intra 4x4 prediction. Table 8-2\n@publishedAll\n*/\ntypedef enum\n{\n    AVC_I4_Vertical = 0,\n    AVC_I4_Horizontal,\n    AVC_I4_DC,\n    AVC_I4_Diagonal_Down_Left,\n    AVC_I4_Diagonal_Down_Right,\n    AVC_I4_Vertical_Right,\n    AVC_I4_Horizontal_Down,\n    AVC_I4_Vertical_Left,\n    AVC_I4_Horizontal_Up\n} AVCIntra4x4PredMode;\n\n/**\nMode of intra 16x16 prediction. Table 8-3\n@publishedAll\n*/\ntypedef enum\n{\n    AVC_I16_Vertical = 0,\n    AVC_I16_Horizontal,\n    AVC_I16_DC,\n    AVC_I16_Plane\n} AVCIntra16x16PredMode;\n\n\n/**\nMode of intra chroma prediction. Table 8-4\n@publishedAll\n*/\ntypedef enum\n{\n    AVC_IC_DC = 0,\n    AVC_IC_Horizontal,\n    AVC_IC_Vertical,\n    AVC_IC_Plane\n} AVCIntraChromaPredMode;\n\n/**\nType of residual going to residual_block_cavlc function, PV created.\n@publishedAll\n*/\ntypedef enum\n{\n    AVC_Luma,\n    AVC_Intra16DC,\n    AVC_Intra16AC,\n    AVC_ChromaDC,\n    AVC_ChromaAC\n} AVCResidualType;\n\n\n/**\nThis structure contains VUI parameters as specified in Annex E.\nSome variables may be removed from the structure if they are found to be useless to store.\n@publishedAll\n*/\ntypedef struct tagHRDParams\n{\n    uint  cpb_cnt_minus1;                                   /* ue(v), range 0..31 */\n    uint  bit_rate_scale;                          /* u(4) */\n    uint  cpb_size_scale;                          /* u(4) */\n    uint32  bit_rate_value_minus1[MAXIMUMVALUEOFcpb_cnt];/* ue(v), range 0..2^32-2 */\n    uint32  cpb_size_value_minus1[MAXIMUMVALUEOFcpb_cnt]; /* ue(v), range 0..2^32-2 */\n    uint  cbr_flag[MAXIMUMVALUEOFcpb_cnt];         /* u(1) */\n    uint  initial_cpb_removal_delay_length_minus1;   /* u(5), default 23 */\n    uint  cpb_removal_delay_length_minus1;           /* u(5), default 23 */\n    uint  dpb_output_delay_length_minus1;            /* u(5), default 23 */\n    uint  time_offset_length;                        /* u(5), default 24 */\n} AVCHRDParams;\n\n/**\nThis structure contains VUI parameters as specified in Annex E.\nSome variables may be removed from the structure if they are found to be useless to store.\n@publishedAll\n*/\ntypedef struct tagVUIParam\n{\n    uint      aspect_ratio_info_present_flag;     /* u(1) */\n    uint  aspect_ratio_idc;                     /* u(8), table E-1 */\n    uint  sar_width;                          /* u(16) */\n    uint  sar_height;                         /* u(16) */\n    uint      overscan_info_present_flag;         /* u(1) */\n    uint      overscan_appropriate_flag;        /* u(1) */\n    uint      video_signal_type_present_flag;     /* u(1) */\n    uint  video_format;                         /* u(3), Table E-2, default 5, unspecified */\n    uint      video_full_range_flag;            /* u(1) */\n    uint      colour_description_present_flag;  /* u(1) */\n    uint  colour_primaries;                   /* u(8), Table E-3, default 2, unspecified */\n    uint  transfer_characteristics;           /* u(8), Table E-4, default 2, unspecified */\n    uint  matrix_coefficients;                /* u(8), Table E-5, default 2, unspecified */\n    uint      chroma_location_info_present_flag;  /* u(1) */\n    uint  chroma_sample_loc_type_top_field;                /* ue(v), Fig. E-1range 0..5, default 0 */\n    uint  chroma_sample_loc_type_bottom_field;                /* ue(v) */\n    uint      timing_info_present_flag;           /* u(1) */\n    uint  num_units_in_tick;                    /* u(32), must be > 0 */\n    uint  time_scale;                           /* u(32), must be > 0 */\n    uint      fixed_frame_rate_flag;            /* u(1), Eq. C-13 */\n    uint      nal_hrd_parameters_present_flag;    /* u(1) */\n    AVCHRDParams nal_hrd_parameters;               /* hrd_paramters */\n    uint      vcl_hrd_parameters_present_flag;    /* u(1) */\n    AVCHRDParams vcl_hrd_parameters;               /* hrd_paramters */\n    /* if ((nal_hrd_parameters_present_flag || (vcl_hrd_parameters_present_flag)) */\n    uint      low_delay_hrd_flag;               /* u(1) */\n    uint    pic_struct_present_flag;\n    uint      bitstream_restriction_flag;         /* u(1) */\n    uint      motion_vectors_over_pic_boundaries_flag;    /* u(1) */\n    uint  max_bytes_per_pic_denom;              /* ue(v), default 2 */\n    uint  max_bits_per_mb_denom;                /* ue(v), range 0..16, default 1 */\n    uint  log2_max_mv_length_vertical;          /* ue(v), range 0..16, default 16 */\n    uint  log2_max_mv_length_horizontal;        /* ue(v), range 0..16, default 16 */\n    uint  max_dec_frame_reordering;             /* ue(v) */\n    uint  max_dec_frame_buffering;              /* ue(v) */\n} AVCVUIParams;\n\n\n/**\nThis structure contains information in a sequence parameter set NAL.\nSome variables may be removed from the structure if they are found to be useless to store.\n@publishedAll\n*/\ntypedef struct tagSeqParamSet\n{\n    uint   Valid;            /* indicates the parameter set is valid */\n\n    uint  profile_idc;              /* u(8) */\n    uint   constrained_set0_flag;  /* u(1) */\n    uint   constrained_set1_flag;  /* u(1) */\n    uint   constrained_set2_flag;  /* u(1) */\n    uint   constrained_set3_flag;  /* u(1) */\n    uint  level_idc;               /* u(8) */\n    uint  seq_parameter_set_id;    /* ue(v), range 0..31 */\n    uint  log2_max_frame_num_minus4; /* ue(v), range 0..12 */\n    uint pic_order_cnt_type;        /* ue(v), range 0..2 */\n    /* if( pic_order_cnt_type == 0 )  */\n    uint log2_max_pic_order_cnt_lsb_minus4; /* ue(v), range 0..12 */\n    /* else if( pic_order_cnt_type == 1 ) */\n    uint delta_pic_order_always_zero_flag;  /* u(1) */\n    int32  offset_for_non_ref_pic;       /* se(v) */\n    int32  offset_for_top_to_bottom_field;  /* se(v) */\n    uint  num_ref_frames_in_pic_order_cnt_cycle;   /* ue(v) , range 0..255 */\n    /* for( i = 0; i < num_ref_frames_in_pic_order_cnt_cycle; i++ ) */\n    int32   offset_for_ref_frame[MAX_NUM_REF_FRAMES_IN_PIC_ORDER_CNT_CYCLE];        /* se(v) */\n    uint  num_ref_frames;                           /* ue(v), range 0..16 */\n    uint   gaps_in_frame_num_value_allowed_flag;    /* u(1) */\n    uint  pic_width_in_mbs_minus1;                  /* ue(v) */\n    uint  pic_height_in_map_units_minus1;           /* ue(v) */\n    uint   frame_mbs_only_flag;                     /* u(1) */\n    /* if( !frame_mbs_only_flag ) */\n    uint   mb_adaptive_frame_field_flag;          /* u(1) */\n    uint   direct_8x8_inference_flag;    /* u(1), must be 1 when frame_mbs_only_flag is 0 */\n    uint   frame_cropping_flag;                     /* u(1) */\n    /* if( frmae_cropping_flag) */\n    uint  frame_crop_left_offset;                /* ue(v) */\n    uint  frame_crop_right_offset;               /* ue(v) */\n    uint  frame_crop_top_offset;                 /* ue(v) */\n    uint  frame_crop_bottom_offset;              /* ue(v) */\n    uint   vui_parameters_present_flag;                      /* u(1) */\n//  uint nal_hrd_parameters_present_flag;\n//  uint vcl_hrd_parameters_present_flag;\n//  AVCHRDParams *nal_hrd_parameters;\n//  AVCHRDParams *vcl_hrd_parameters;\n    AVCVUIParams vui_parameters;                  /* AVCVUIParam */\n} AVCSeqParamSet;\n\n/**\nThis structure contains information in a picture parameter set NAL.\nSome variables may be removed from the structure if they are found to be useless to store.\n@publishedAll\n*/\ntypedef struct tagPicParamSet\n{\n    uint  pic_parameter_set_id;              /* ue(v), range 0..255 */\n    uint  seq_parameter_set_id;              /* ue(v), range 0..31 */\n    uint  entropy_coding_mode_flag;         /* u(1) */\n    uint  pic_order_present_flag;        /* u(1) */\n    uint  num_slice_groups_minus1;           /* ue(v), range in Annex A */\n    /* if( num_slice_groups_minus1 > 0) */\n    uint  slice_group_map_type;           /* ue(v), range 0..6 */\n    /* if( slice_group_map_type = = 0 ) */\n    /* for(0:1:num_slice_groups_minus1) */\n    uint  run_length_minus1[MAX_NUM_SLICE_GROUP]; /* ue(v) */\n    /* else if( slice_group_map_type = = 2 ) */\n    /* for(0:1:num_slice_groups_minus1-1) */\n    uint  top_left[MAX_NUM_SLICE_GROUP-1];      /* ue(v) */\n    uint  bottom_right[MAX_NUM_SLICE_GROUP-1];  /* ue(v) */\n    /* else if( slice_group_map_type = = 3 || 4 || 5 */\n    uint  slice_group_change_direction_flag;        /* u(1) */\n    uint  slice_group_change_rate_minus1;            /* ue(v) */\n    /* else if( slice_group_map_type = = 6 ) */\n    uint  pic_size_in_map_units_minus1;          /* ue(v) */\n    /* for(0:1:pic_size_in_map_units_minus1) */\n    uint  *slice_group_id;                           /* complete MBAmap u(v) */\n    uint  num_ref_idx_l0_active_minus1;                  /* ue(v), range 0..31 */\n    uint  num_ref_idx_l1_active_minus1;                  /* ue(v), range 0..31 */\n    uint  weighted_pred_flag;                           /* u(1) */\n    uint  weighted_bipred_idc;                          /* u(2), range 0..2 */\n    int   pic_init_qp_minus26;                       /* se(v), range -26..25 */\n    int   pic_init_qs_minus26;                       /* se(v), range -26..25 */\n    int   chroma_qp_index_offset;                    /* se(v), range -12..12 */\n    uint  deblocking_filter_control_present_flag;       /* u(1) */\n    uint  constrained_intra_pred_flag;                  /* u(1) */\n    uint  redundant_pic_cnt_present_flag;               /* u(1) */\n} AVCPicParamSet;\n\n\n/**\nThis structure contains slice header information.\nSome variables may be removed from the structure if they are found to be useless to store.\n@publishedAll\n*/\ntypedef struct tagSliceHeader\n{\n    uint    first_mb_in_slice;      /* ue(v) */\n    AVCSliceType slice_type;                /* ue(v), Table 7-3, range 0..9 */\n    uint    pic_parameter_set_id;   /* ue(v), range 0..255 */\n    uint    frame_num;              /* u(v), see log2max_frame_num_minus4 */\n    /* if( !frame_mbs_only_flag) */\n    uint    field_pic_flag;         /* u(1) */\n    /* if(field_pic_flag) */\n    uint bottom_field_flag; /* u(1) */\n    /* if(nal_unit_type == 5) */\n    uint    idr_pic_id;         /* ue(v), range 0..65535 */\n    /* if(pic_order_cnt_type==0) */\n    uint    pic_order_cnt_lsb;  /* u(v), range 0..MaxPicOrderCntLsb-1 */\n    /* if(pic_order_present_flag && !field_pic_flag) */\n    int32 delta_pic_order_cnt_bottom;   /* se(v) */\n    /* if(pic_order_cnt_type==1 && !delta_pic_order_always_zero_flag) */\n    /* if(pic_order_present_flag && !field_pic_flag) */\n    int32 delta_pic_order_cnt[2];\n    /* if(redundant_pic_cnt_present_flag) */\n    uint redundant_pic_cnt; /* ue(v), range 0..127 */\n    /* if(slice_type == B) */\n    uint direct_spatial_mv_pred_flag; /* u(1) */\n    /* if(slice_type == P || slice_type==SP || slice_type==B) */\n    uint num_ref_idx_active_override_flag;  /* u(1) */\n    /* if(num_ref_idx_active_override_flag) */\n    uint num_ref_idx_l0_active_minus1;  /* ue(v) */\n    /* if(slie_type == B) */\n    uint num_ref_idx_l1_active_minus1;  /* ue(v) */\n\n    /* ref_pic_list_reordering() */\n    uint ref_pic_list_reordering_flag_l0;   /* u(1) */\n    uint reordering_of_pic_nums_idc_l0[MAX_REF_PIC_LIST_REORDERING];   /* ue(v), range 0..3 */\n    uint abs_diff_pic_num_minus1_l0[MAX_REF_PIC_LIST_REORDERING];   /* ue(v) */\n    uint long_term_pic_num_l0[MAX_REF_PIC_LIST_REORDERING];     /* ue(v) */\n    uint ref_pic_list_reordering_flag_l1;   /* u(1) */\n    uint reordering_of_pic_nums_idc_l1[MAX_REF_PIC_LIST_REORDERING];   /* ue(v), range 0..3 */\n    uint abs_diff_pic_num_minus1_l1[MAX_REF_PIC_LIST_REORDERING];   /* ue(v) */\n    uint long_term_pic_num_l1[MAX_REF_PIC_LIST_REORDERING];     /* ue(v) */\n\n    /* end ref_pic_list_reordering() */\n    /* if(nal_ref_idc!=0) */\n    /* dec_ref_pic_marking() */\n    uint    no_output_of_prior_pics_flag;   /* u(1) */\n    uint long_term_reference_flag;      /* u(1) */\n    uint    adaptive_ref_pic_marking_mode_flag; /* u(1) */\n    uint    memory_management_control_operation[MAX_DEC_REF_PIC_MARKING];   /* ue(v), range 0..6 */\n    uint difference_of_pic_nums_minus1[MAX_DEC_REF_PIC_MARKING];    /* ue(v) */\n    uint    long_term_pic_num[MAX_DEC_REF_PIC_MARKING];             /* ue(v) */\n    uint    long_term_frame_idx[MAX_DEC_REF_PIC_MARKING];           /* ue(v) */\n    uint    max_long_term_frame_idx_plus1[MAX_DEC_REF_PIC_MARKING]; /* ue(v) */\n    /* end dec_ref_pic_marking() */\n    /* if(entropy_coding_mode_flag && slice_type!=I && slice_type!=SI) */\n    uint cabac_init_idc;        /* ue(v), range 0..2 */\n    int slice_qp_delta;     /* se(v), range 0..51 */\n    /* if(slice_type==SP || slice_type==SI) */\n    /* if(slice_type==SP) */\n    uint    sp_for_switch_flag; /* u(1) */\n    int slice_qs_delta;     /* se(v) */\n\n    /* if(deblocking_filter_control_present_flag)*/\n    uint disable_deblocking_filter_idc; /* ue(v), range 0..2 */\n    /* if(disable_deblocking_filter_idc!=1) */\n    int slice_alpha_c0_offset_div2; /* se(v), range -6..6, default 0 */\n    int slice_beta_offset_div_2; /* se(v), range -6..6, default 0 */\n    /* if(num_slice_groups_minus1>0 && slice_group_map_type>=3 && slice_group_map_type<=5)*/\n    uint    slice_group_change_cycle;   /* u(v), use ceil(log2(PicSizeInMapUnits/SliceGroupChangeRate + 1)) bits*/\n\n} AVCSliceHeader;\n\n/**\nThis struct contains information about the neighboring pixel.\n@publishedAll\n*/\ntypedef struct tagPixPos\n{\n    int available;\n    int mb_addr;    /* macroblock address of the current pixel, see below */\n    int x;      /* x,y positions of current pixel relative to the macroblock mb_addr */\n    int y;\n    int pos_x;  /* x,y positions of current pixel relative to the picture. */\n    int pos_y;\n} AVCPixelPos;\n\ntypedef struct tagNeighborAvailability\n{\n    int left;\n    int top;    /* macroblock address of the current pixel, see below */\n    int top_right;      /* x,y positions of current pixel relative to the macroblock mb_addr */\n} AVCNeighborAvailability;\n\n\n/**\nThis structure contains picture data and related information necessary to be used as\nreference frame.\n@publishedAll\n*/\ntypedef struct tagPictureData\n{\n    uint16 RefIdx;  /* index used for reference frame */\n    uint8 *Sl;   /* derived from base_dpb in AVCFrameStore */\n    uint8 *Scb;  /* for complementary fields, YUV are interlaced */\n    uint8 *Scr;  /* Sl of top_field and bottom_fields will be one line apart and the\n                    stride will be 2 times the width. */\n    /* For non-complementary field, the above still applies. A special\n       output formatting is required. */\n\n    /* Then, necessary variables that need to be stored */\n    AVCPictureType  picType; /* frame, top-field or bot-field */\n    /*bool*/\n    uint    isReference;\n    /*bool*/\n    uint    isLongTerm;\n    int     PicOrderCnt;\n    int     PicNum;\n    int     LongTermPicNum;\n\n    int     width; /* how many pixel per line */\n    int     height;/* how many line */\n    int     pitch; /* how many pixel between the line */\n\n    uint    padded; /* flag for being padded */\n\n} AVCPictureData;\n\n/**\nThis structure contains information for frame storage.\n@publishedAll\n*/\ntypedef struct tagFrameStore\n{\n    uint8 *base_dpb;    /* base pointer for the YCbCr */\n\n    int     IsReference; /*  0=not used for ref; 1=top used; 2=bottom used; 3=both fields (or frame) used */\n    int     IsLongTerm;  /*  0=not used for ref; 1=top used; 2=bottom used; 3=both fields (or frame) used */\n    /* if IsLongTerm is true, IsReference can be ignored. */\n    /* if IsReference is true, IsLongterm will be checked for short-term or long-term. */\n    /* IsUsed must be true to enable the validity of IsReference and IsLongTerm */\n\n    int     IsOutputted;  /* has it been outputted via AVCDecGetOutput API, then don't output it again,\n                            wait until it is returned. */\n    AVCPictureData frame;\n\n    int     FrameNum;\n    int     FrameNumWrap;\n    int     LongTermFrameIdx;\n    int     PicOrderCnt; /* of the frame, smaller of the 2 fields */\n\n} AVCFrameStore;\n\n/**\nThis structure maintains the actual memory for the decoded picture buffer (DPB) which is\nallocated at the beginning according to profile/level.\nOnce decoded_picture_buffer is allocated, Sl,Scb,Scr in\nAVCPictureData structure just point to the address in decoded_picture_buffer.\nused_size maintains the used space.\nNOTE:: In order to maintain contiguous memory space, memory equal to a single frame is\nassigned at a time. Two opposite fields reside in the same frame memory.\n\n  |-------|---|---|---|xxx|-------|xxx|---|-------|   decoded_picture_buffer\n    frame  top bot top      frame      bot  frame\n      0     1   1   2         3         4     5\n\n  bot 2 and top 4 do not exist, the memory is not used.\n\n@publishedAll\n*/\ntypedef struct tagDecPicBuffer\n{\n    uint8 *decoded_picture_buffer;  /* actual memory */\n    uint32  dpb_size;       /* size of dpb in bytes */\n    uint32  used_size;  /* used size */\n    struct tagFrameStore    *fs[MAX_FS]; /* list of frame stored, actual buffer */\n    int     num_fs;  /* size of fs */\n\n} AVCDecPicBuffer;\n\n\n/**\nThis structure contains macroblock related variables.\n@publishedAll\n*/\ntypedef struct tagMacroblock\n{\n    AVCIntraChromaPredMode  intra_chroma_pred_mode;  /* ue(v) */\n\n    int32 mvL0[16];  /* motion vectors, 16 bit packed (x,y) per element  */\n    int32 mvL1[16];\n    int16 ref_idx_L0[4];\n    int16 ref_idx_L1[4];\n    uint16 RefIdx[4]; /* ref index, has value of AVCPictureData->RefIdx */\n    /* stored data */\n    /*bool*/\n    uint    mb_intra; /* intra flag */\n    /*bool*/\n    uint    mb_bottom_field;\n\n    AVCMBMode mbMode;   /* type of MB prediction */\n    AVCSubMBMode subMbMode[4]; /* for each 8x8 partition */\n\n    uint    CBP; /* CodeBlockPattern */\n    AVCIntra16x16PredMode i16Mode; /* Intra16x16PredMode */\n    AVCIntra4x4PredMode i4Mode[16]; /* Intra4x4PredMode, in raster scan order */\n    int NumMbPart; /* number of partition */\n    AVCPredMode MBPartPredMode[4][4]; /* prediction mode [MBPartIndx][subMBPartIndx] */\n    int MbPartWidth;\n    int MbPartHeight;\n    int NumSubMbPart[4];  /* for each 8x8 partition */\n    int SubMbPartWidth[4];  /* for each 8x8 partition */\n    int SubMbPartHeight[4]; /* for each 8x8 partition */\n\n    uint8 nz_coeff[NUM_BLKS_IN_MB];  /* [blk_y][blk_x], Chroma is [4..5][0...3], see predict_nnz() function */\n\n    int QPy; /* Luma QP */\n    int QPc; /* Chroma QP */\n    int QSc; /* Chroma QP S-picture */\n\n    int slice_id;           // MC slice\n} AVCMacroblock;\n\n\n/**\nThis structure contains common internal variables between the encoder and decoder\nsuch that some functions can be shared among them.\n@publishedAll\n*/\ntypedef struct tagCommonObj\n{\n    /* put these 2 up here to make sure they are word-aligned */\n    int16   block[NUM_PIXELS_IN_MB]; /* for transformed residue coefficient */\n    uint8   *pred_block;    /* pointer to prediction block, could point to a frame */\n#ifdef USE_PRED_BLOCK\n    uint8   pred[688];  /* for prediction */\n    /* Luma [0-399], Cb [400-543], Cr[544-687] */\n#endif\n    int     pred_pitch; /* either equal to 20 or to frame pitch */\n\n    /* temporary buffers for intra prediction */\n    /* these variables should remain inside fast RAM */\n#ifdef MB_BASED_DEBLOCK\n    uint8   *intra_pred_top; /* a row of pixel for intra prediction */\n    uint8   intra_pred_left[17]; /* a column of pixel for intra prediction */\n    uint8   *intra_pred_top_cb;\n    uint8   intra_pred_left_cb[9];\n    uint8   *intra_pred_top_cr;\n    uint8   intra_pred_left_cr[9];\n#endif\n    /* pointer to the prediction area for intra prediction */\n    uint8   *pintra_pred_top;   /* pointer to the top intra prediction value */\n    uint8   *pintra_pred_left;  /* pointer to the left intra prediction value */\n    uint8   intra_pred_topleft; /* the [-1,-1] neighboring pixel */\n    uint8   *pintra_pred_top_cb;\n    uint8   *pintra_pred_left_cb;\n    uint8   intra_pred_topleft_cb;\n    uint8   *pintra_pred_top_cr;\n    uint8   *pintra_pred_left_cr;\n    uint8   intra_pred_topleft_cr;\n\n    int QPy;\n    int QPc;\n    int QPy_div_6;\n    int QPy_mod_6;\n    int QPc_div_6;\n    int QPc_mod_6;\n    /**** nal_unit ******/\n    /* previously in AVCNALUnit format */\n    uint    NumBytesInRBSP;\n    int     forbidden_bit;\n    int     nal_ref_idc;\n    AVCNalUnitType  nal_unit_type;\n    AVCNalUnitType  prev_nal_unit_type;\n    /*bool*/\n    uint    slice_data_partitioning; /* flag when nal_unit_type is between 2 and 4 */\n    /**** ******** ******/\n    AVCSliceType slice_type;\n    AVCDecPicBuffer     *decPicBuf; /* decoded picture buffer */\n\n    AVCSeqParamSet *currSeqParams; /*  the currently used one */\n\n    AVCPicParamSet  *currPicParams; /* the currently used one */\n    uint        seq_parameter_set_id;\n    /* slice header */\n    AVCSliceHeader *sliceHdr;   /* slice header param syntax variables */\n\n    AVCPictureData  *currPic; /* pointer to current picture */\n    AVCFrameStore   *currFS;  /* pointer to current frame store */\n    AVCPictureType  currPicType; /* frame, top-field or bot-field */\n    /*bool*/\n    uint    newPic; /* flag for new picture */\n    uint            newSlice; /* flag for new slice */\n    AVCPictureData  *prevRefPic; /* pointer to previous picture */\n\n    AVCMacroblock   *mblock; /* array of macroblocks covering entire picture */\n    AVCMacroblock   *currMB; /* pointer to current macroblock */\n    uint                    mbNum; /* number of current MB */\n    int                 mb_x;  /* x-coordinate of the current mbNum */\n    int                 mb_y;  /* y-coordinate of the current mbNum */\n\n    /* For internal operation, scratch memory for MV, prediction, transform, etc.*/\n    uint32 cbp4x4; /* each bit represent nonzero 4x4 block in reverse raster scan order */\n    /* starting from luma, Cb and Cr, lsb toward msb */\n    int mvd_l0[4][4][2]; /* [mbPartIdx][subMbPartIdx][compIdx], se(v) */\n    int mvd_l1[4][4][2]; /* [mbPartIdx][subMbPartIdx][compIdx], se(v) */\n\n    int mbAddrA, mbAddrB, mbAddrC, mbAddrD; /* address of neighboring MBs */\n    /*bool*/\n    uint    mbAvailA, mbAvailB, mbAvailC, mbAvailD; /* availability */\n    /*bool*/\n    uint    intraAvailA, intraAvailB, intraAvailC, intraAvailD; /* for intra mode */\n    /***********************************************/\n    /* The following variables are defined in the draft. */\n    /* They may need to be stored in PictureData structure and used for reference. */\n    /* In that case, just move or copy it to AVCDecPictureData structure. */\n\n    int     padded_size;    /* size of extra padding to a frame */\n\n    uint    MaxFrameNum;    /*2^(log2_max_frame_num_minus4+4), range 0.. 2^16-1 */\n    uint    MaxPicOrderCntLsb; /*2^(log2_max_pic_order_cnt_lsb_minus4+4), 0..2^16-1 */\n    uint    PicWidthInMbs;  /*pic_width_in_mbs_minus1+1 */\n    uint    PicWidthInSamplesL; /* PicWidthInMbs*16 */\n    uint    PicWidthInSamplesC; /* PicWIdthInMbs*8 */\n    uint    PicHeightInMapUnits; /* pic_height_in_map_units_minus1+1 */\n    uint    PicSizeInMapUnits;  /* PicWidthInMbs*PicHeightInMapUnits */\n    uint    FrameHeightInMbs;   /*(2-frame_mbs_only_flag)*PicHeightInMapUnits */\n\n    uint    SliceGroupChangeRate; /* slice_group_change_rate_minus1 + 1 */\n\n    /* access unit */\n    uint    primary_pic_type;   /* u(3), Table 7-2, kinda informative only */\n\n    /* slice data partition */\n    uint    slice_id;           /* ue(v) */\n\n    uint    UnusedShortTermFrameNum;\n    uint    PrevRefFrameNum;\n    uint    MbaffFrameFlag; /* (mb_adaptive_frame_field_flag && !field_pic_flag) */\n    uint    PicHeightInMbs; /* FrameHeightInMbs/(1+field_pic_flag) */\n    int     PicHeightInSamplesL; /* PicHeightInMbs*16 */\n    int     PicHeightInSamplesC; /* PicHeightInMbs*8 */\n    uint    PicSizeInMbs;   /* PicWidthInMbs*PicHeightInMbs */\n    uint    level_idc;\n    int     numMBs;\n    uint    MaxPicNum;\n    uint    CurrPicNum;\n    int     QSy;    /* 26+pic_init_qp_minus26+slice_qs_delta */\n    int     FilterOffsetA;\n    int     FilterOffsetB;\n    uint    MapUnitsInSliceGroup0;  /* Min(slie_group_change_cycle*SliceGroupChangeRate,PicSizeInMapUnits) */\n    /* dec_ref_pic_marking */\n    int     MaxLongTermFrameIdx;\n    int     LongTermFrameIdx;\n\n    /* POC related variables */\n    /*bool*/\n    uint    mem_mgr_ctrl_eq_5;  /* if memory_management_control_operation equal to 5 flag */\n    int     PicOrderCnt;\n    int     BottomFieldOrderCnt, TopFieldOrderCnt;\n    /* POC mode 0 */\n    int     prevPicOrderCntMsb;\n    uint    prevPicOrderCntLsb;\n    int     PicOrderCntMsb;\n    /* POC mode 1 */\n    int     prevFrameNumOffset, FrameNumOffset;\n    uint    prevFrameNum;\n    int     absFrameNum;\n    int     picOrderCntCycleCnt, frameNumInPicOrderCntCycle;\n    int     expectedDeltaPerPicOrderCntCycle;\n    int     expectedPicOrderCnt;\n\n    /* FMO */\n    int *MbToSliceGroupMap;  /* to be re-calculate at the beginning */\n\n    /* ref pic list */\n    AVCPictureData  *RefPicList0[MAX_REF_PIC_LIST]; /* list 0 */\n    AVCPictureData  *RefPicList1[MAX_REF_PIC_LIST]; /* list 1 */\n    AVCFrameStore   *refFrameList0ShortTerm[32];\n    AVCFrameStore   *refFrameList1ShortTerm[32];\n    AVCFrameStore   *refFrameListLongTerm[32];\n    int     refList0Size;\n    int     refList1Size;\n\n    /* slice data semantics*/\n    int mb_skip_run;    /* ue(v) */\n    /*uint  mb_skip_flag;*/ /* ae(v) */\n    /* uint end_of_slice_flag;*//* ae(v) */\n    /***********************************************/\n\n    /* function pointers */\n    int (*is_short_ref)(AVCPictureData *s);\n    int (*is_long_ref)(AVCPictureData *s);\n\n} AVCCommonObj;\n\n/**\nCommonly used constant arrays.\n@publishedAll\n*/\n/**\nZigzag scan from 1-D to 2-D. */\nconst static uint8 ZZ_SCAN[16] = {0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15};\n/* Zigzag scan from 1-D to 2-D output to block[24][16]. */\nconst static uint8 ZZ_SCAN_BLOCK[16] = {0, 1, 16, 32, 17, 2, 3, 18, 33, 48, 49, 34, 19, 35, 50, 51};\n\n/**\nFrom zigzag to raster for luma DC value */\nconst static uint8 ZIGZAG2RASTERDC[16] = {0, 4, 64, 128, 68, 8, 12, 72, 132, 192, 196, 136, 76, 140, 200, 204};\n\n\n/**\nMapping from coding scan block indx to raster scan block index */\nconst static int blkIdx2blkX[16] = {0, 1, 0, 1, 2, 3, 2, 3, 0, 1, 0, 1, 2, 3, 2, 3};\nconst static int blkIdx2blkY[16] = {0, 0, 1, 1, 0, 0, 1, 1, 2, 2, 3, 3, 2, 2, 3, 3};\n/** from [blk8indx][blk4indx] to raster scan index */\nconst static int blkIdx2blkXY[4][4] = {{0, 1, 4, 5}, {2, 3, 6, 7}, {8, 9, 12, 13}, {10, 11, 14, 15}};\n\n/*\nAvailability of the neighboring top-right block relative to the current block. */\nconst static int BlkTopRight[16] = {2, 2, 2, 3, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0};\n\n/**\nTable 8-13 Specification of QPc as a function of qPI. */\nconst static uint8 mapQPi2QPc[52] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,\n                                     21, 22, 23, 24, 25, 26, 27, 28, 29, 29, 30, 31, 32, 32, 33, 34, 34, 35, 35, 36, 36,\n                                     37, 37, 37, 38, 38, 38, 39, 39, 39, 39\n                                    };\n\n/**\nSee 8.5.5 equation (8-252 and 8-253) the definition of v matrix. */\n/* in zigzag scan */\nconst static int dequant_coefres[6][16] =\n{\n    {10, 13, 13, 10, 16, 10, 13, 13, 13, 13, 16, 10, 16, 13, 13, 16},\n    {11, 14, 14, 11, 18, 11, 14, 14, 14, 14, 18, 11, 18, 14, 14, 18},\n    {13, 16, 16, 13, 20, 13, 16, 16, 16, 16, 20, 13, 20, 16, 16, 20},\n    {14, 18, 18, 14, 23, 14, 18, 18, 18, 18, 23, 14, 23, 18, 18, 23},\n    {16, 20, 20, 16, 25, 16, 20, 20, 20, 20, 25, 16, 25, 20, 20, 25},\n    {18, 23, 23, 18, 29, 18, 23, 23, 23, 23, 29, 18, 29, 23, 23, 29}\n};\n\n/**\nFrom jm7.6 block.c. (in zigzag scan) */\nconst static int quant_coef[6][16] =\n{\n    {13107, 8066,   8066,   13107,  5243,   13107,  8066,   8066,   8066,   8066,   5243,   13107,  5243,   8066,   8066,   5243},\n    {11916, 7490,   7490,   11916,  4660,   11916,  7490,   7490,   7490,   7490,   4660,   11916,  4660,   7490,   7490,   4660},\n    {10082, 6554,   6554,   10082,  4194,   10082,  6554,   6554,   6554,   6554,   4194,   10082,  4194,   6554,   6554,   4194},\n    {9362,  5825,   5825,   9362,   3647,   9362,   5825,   5825,   5825,   5825,   3647,   9362,   3647,   5825,   5825,   3647},\n    {8192,  5243,   5243,   8192,   3355,   8192,   5243,   5243,   5243,   5243,   3355,   8192,   3355,   5243,   5243,   3355},\n    {7282,  4559,   4559,   7282,   2893,   7282,   4559,   4559,   4559,   4559,   2893,   7282,   2893,   4559,   4559,   2893}\n};\n\n/**\nConvert scan from raster scan order to block decoding order and\nfrom block decoding order to raster scan order. Same table!!!\n*/\nconst static uint8 ras2dec[16] = {0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15};\n\n/* mapping from level_idc to index map */\nconst static uint8 mapLev2Idx[61] = {255, 255, 255, 255, 255, 255, 255, 255, 255, 1,\n                                     0, 1, 2, 3, 255, 255, 255, 255, 255, 255,\n                                     4, 5, 6, 255, 255, 255, 255, 255, 255, 255,\n                                     7, 8, 9, 255, 255, 255, 255, 255, 255, 255,\n                                     10, 11, 12, 255, 255, 255, 255, 255, 255, 255,\n                                     13, 14, 255, 255, 255, 255, 255, 255, 255, 255\n                                    };\n/* map back from index to Level IDC */\nconst static uint8 mapIdx2Lev[MAX_LEVEL_IDX] = {10, 11, 12, 13, 20, 21, 22, 30, 31, 32, 40, 41, 42, 50, 51};\n\n/**\nfrom the index map to the MaxDPB value times 2 */\nconst static int32 MaxDPBX2[MAX_LEVEL_IDX] = {297, 675, 1782, 1782, 1782, 3564, 6075, 6075,\n        13500, 15360, 24576, 24576, 24576, 82620, 138240\n                                             };\n\n/* map index to the max frame size */\nconst static int MaxFS[MAX_LEVEL_IDX] = {99, 396, 396, 396, 396, 792, 1620, 1620, 3600, 5120,\n                                        8192, 8192, 8192, 22080, 36864\n                                        };\n\n/* map index to max MB processing rate */\nconst static int32 MaxMBPS[MAX_LEVEL_IDX] = {1485, 3000, 6000, 11880, 11880, 19800, 20250, 40500,\n        108000, 216000, 245760, 245760, 491520, 589824, 983040\n                                            };\n\n/* map index to max video bit rate */\nconst static uint32 MaxBR[MAX_LEVEL_IDX] = {64, 192, 384, 768, 2000, 4000, 4000, 10000, 14000, 20000,\n        20000, 50000, 50000, 135000, 240000\n                                           };\n\n/* map index to max CPB size */\nconst static uint32 MaxCPB[MAX_LEVEL_IDX] = {175, 500, 1000, 2000, 2000, 4000, 4000, 10000, 14000,\n        20000, 25000, 62500, 62500, 135000, 240000\n                                            };\n\n/* map index to max vertical MV range */\nconst static int MaxVmvR[MAX_LEVEL_IDX] = {64, 128, 128, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512, 512, 512};\n\n#endif /*  _AVCINT_COMMON_H_ */\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/common/include/avclib_common.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/**\nThis file contains declarations of internal functions for common encoder/decoder library.\n@publishedAll\n*/\n#ifndef AVCCOMMON_LIB_H_INCLUDED\n#define AVCCOMMON_LIB_H_INCLUDED\n\n#ifndef AVCINT_COMMON_H_INCLUDED\n#include \"avcint_common.h\"\n#endif\n\n#ifndef OSCL_BASE_H_INCLUDED\n#include \"oscl_base.h\"\n#endif\n\n/*----------- deblock.c --------------*/\n/**\nThis function performs conditional deblocking on a complete picture.\n\\param \"video\"  \"Pointer to AVCCommonObj.\"\n\\return \"AVC_SUCCESS for success and AVC_FAIL otherwise.\"\n*/\nOSCL_IMPORT_REF AVCStatus DeblockPicture(AVCCommonObj *video);\n\n/**\nThis function performs MB-based deblocking when MB_BASED_DEBLOCK\nis defined at compile time.\n\\param \"video\"  \"Pointer to AVCCommonObj.\"\n\\return \"AVC_SUCCESS for success and AVC_FAIL otherwise.\"\n*/\nvoid MBInLoopDeblock(AVCCommonObj *video);\n\n\n/*---------- dpb.c --------------------*/\n/**\nThis function is called everytime a new sequence is detected.\n\\param \"avcHandle\"  \"Pointer to AVCHandle.\"\n\\param \"video\" \"Pointer to AVCCommonObj.\"\n\\param \"padding\"    \"Flag specifying whether padding in luma component is needed (used for encoding).\"\n\\return \"AVC_SUCCESS or AVC_FAIL.\"\n*/\nOSCL_IMPORT_REF AVCStatus AVCConfigureSequence(AVCHandle *avcHandle, AVCCommonObj *video, bool padding);\n\n/**\nThis function allocates and initializes the decoded picture buffer structure based on\nthe profile and level for the first sequence parameter set. Currently,\nit does not allow changing in profile/level for subsequent SPS.\n\\param \"avcHandle\"  \"Pointer to AVCHandle.\"\n\\param \"video\" \"Pointer to AVCCommonObj.\"\n\\param \"FrameHeightInMbs\"   \"Height of the frame in the unit of MBs.\"\n\\param \"PicWidthInMbs\"  \"Width of the picture in the unit of MBs.\"\n\\param \"padding\"    \"Flag specifying whether padding in luma component is needed (used for encoding).\"\n\\return \"AVC_SUCCESS or AVC_FAIL.\"\n*/\nAVCStatus InitDPB(AVCHandle *avcHandle, AVCCommonObj *video, int FrameHeightInMbs, int PicWidthInMbs, bool padding);\n\n/**\nThis function frees the DPB memory.\n\\param \"avcHandle\"  \"Pointer to AVCHandle.\"\n\\param \"video\" \"Pointer to AVCCommonObj.\"\n\\return \"AVC_SUCCESS or AVC_FAIL.\"\n*/\nOSCL_IMPORT_REF AVCStatus CleanUpDPB(AVCHandle *avcHandle, AVCCommonObj *video);\n\n/**\nThis function finds empty frame in the decoded picture buffer to be used for the\ncurrent picture, initializes the corresponding picture structure with Sl, Scb, Scr,\nwidth, height and pitch.\n\\param \"avcHandle\" \"Pointer to the main handle object.\"\n\\param \"video\"  \"Pointer to AVCCommonObj.\"\n\\return \"AVC_SUCCESS or AVC_FAIL.\"\n*/\nOSCL_IMPORT_REF AVCStatus DPBInitBuffer(AVCHandle *avcHandle, AVCCommonObj *video);\n/**\nThis function finds empty frame in the decoded picture buffer to be used for the\ncurrent picture, initializes the corresponding picture structure with Sl, Scb, Scr,\nwidth, height and pitch.\n\\param \"video\"  \"Pointer to AVCCommonObj.\"\n\\param \"CurrPicNum\" \"Current picture number (only used in decoder).\"\n\\return \"AVC_SUCCESS or AVC_FAIL.\"\n*/\n\nOSCL_IMPORT_REF void DPBInitPic(AVCCommonObj *video, int CurrPicNum);\n\n/**\nThis function releases the current frame back to the available pool for skipped frame after encoding.\n\\param \"avcHandle\" \"Pointer to the main handle object.\"\n\\param \"video\" \"Pointer to the AVCCommonObj.\"\n\\return \"void.\"\n*/\nOSCL_IMPORT_REF void DPBReleaseCurrentFrame(AVCHandle *avcHandle, AVCCommonObj *video);\n\n/**\nThis function performs decoded reference picture marking process and store the current picture to the\ncorresponding frame storage in the decoded picture buffer.\n\\param \"avcHandle\" \"Pointer to the main handle object.\"\n\\param \"video\" \"Pointer to the AVCCommonObj.\"\n\\return \"AVC_SUCCESS or AVC_FAIL.\"\n*/\nOSCL_IMPORT_REF AVCStatus StorePictureInDPB(AVCHandle *avcHandle, AVCCommonObj *video);\n\n/**\nThis function perform sliding window operation on the reference picture lists, see subclause 8.2.5.3.\nIt removes short-term ref frames with smallest FrameNumWrap from the reference list.\n\\param \"avcHandle\" \"Pointer to the main handle object.\"\n\\param \"video\" \"Pointer to the AVCCommonObj.\"\n\\param \"dpb\"  \"Pointer to the AVCDecPicBuffer.\"\n\\return \"AVC_SUCCESS or AVC_FAIL (contradicting values or scenario as in the Note in the draft).\"\n*/\nAVCStatus sliding_window_process(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb);\n\n\n/**\nThis function perform adaptive memory marking operation on the reference picture lists,\nsee subclause 8.2.5.4. It calls other functions for specific operations.\n\\param \"video\" \"Pointer to the AVCCommonObj.\"\n\\param \"dpb\"  \"Pointer to the AVCDecPicBuffer.\"\n\\param \"sliceHdr\"   \"Pointer to the AVCSliceHeader.\"\n\\return \"AVC_SUCCESS or AVC_FAIL (contradicting values or scenario as in the Note in the draft).\"\n*/\nAVCStatus adaptive_memory_marking(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, AVCSliceHeader *sliceHdr);\n\n/**\nThis function performs memory management control operation 1, marking a short-term picture\nas unused for reference. See subclause 8.2.5.4.1.\n\\param \"video\" \"Pointer to the AVCCommonObj.\"\n\\param \"dpb\"  \"Pointer to the AVCDecPicBuffer.\"\n\\param \"difference_of_pic_nums_minus1\"  \"From the syntax in dec_ref_pic_marking().\"\n*/\nvoid MemMgrCtrlOp1(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, int difference_of_pic_nums_minus1);\n\n/**\nThis function performs memory management control operation 2, marking a long-term picture\nas unused for reference. See subclause 8.2.5.4.2.\n\\param \"dpb\"  \"Pointer to the AVCDecPicBuffer.\"\n\\param \"field_pic_flag\"  \"Flag whether the current picture is field or not.\"\n\\param \"long_term_pic_num\"  \"From the syntax in dec_ref_pic_marking().\"\n*/\nvoid MemMgrCtrlOp2(AVCHandle *avcHandle, AVCDecPicBuffer *dpb, int long_term_pic_num);\n\n/**\nThis function performs memory management control operation 3, assigning a LongTermFrameIdx to\na short-term reference picture. See subclause 8.2.5.4.3.\n\\param \"video\" \"Pointer to the AVCCommonObj.\"\n\\param \"dpb\"  \"Pointer to the AVCDecPicBuffer.\"\n\\param \"difference_of_pic_nums_minus1\"  \"From the syntax in dec_ref_pic_marking().\"\n\\param \"long_term_pic_num\"  \"From the syntax in dec_ref_pic_marking().\"\n*/\nvoid MemMgrCtrlOp3(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, uint difference_of_pic_nums_minus1,\n                   uint long_term_frame_idx);\n\n/**\nThis function performs memory management control operation 4, getting new MaxLongTermFrameIdx.\n See subclause 8.2.5.4.4.\n\\param \"video\" \"Pointer to the AVCCommonObj.\"\n\\param \"dpb\"  \"Pointer to the AVCDecPicBuffer.\"\n\\param \"max_long_term_frame_idx_plus1\"  \"From the syntax in dec_ref_pic_marking().\"\n*/\nvoid MemMgrCtrlOp4(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, uint max_long_term_frame_idx_plus1);\n\n/**\nThis function performs memory management control operation 5, marking all reference pictures\nas unused for reference and set MaxLongTermFrameIdx to no long-termframe indices.\n See subclause 8.2.5.4.5.\n\\param \"video\" \"Pointer to the AVCCommonObj.\"\n\\param \"dpb\"  \"Pointer to the AVCDecPicBuffer.\"\n*/\nvoid MemMgrCtrlOp5(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb);\n\n/**\nThis function performs memory management control operation 6, assigning a long-term frame index\nto the current picture. See subclause 8.2.5.4.6.\n\\param \"video\" \"Pointer to the AVCCommonObj.\"\n\\param \"dpb\"  \"Pointer to the AVCDecPicBuffer.\"\n\\param \"long_term_frame_idx\"  \"From the syntax in dec_ref_pic_marking().\"\n*/\nvoid MemMgrCtrlOp6(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, uint long_term_frame_idx);\n\n/**\nThis function mark a long-term ref frame with a specific frame index as unused for reference.\n\\param \"dpb\"  \"Pointer to the AVCDecPicBuffer.\"\n\\param \"long_term_frame_idx\"  \"To look for\"\n*/\nvoid unmark_long_term_frame_for_reference_by_frame_idx(AVCHandle *avcHandle, AVCDecPicBuffer *dpb, uint long_term_frame_idx);\n\n/**\nThis function mark a long-term ref field with a specific frame index as unused for reference except\na frame that contains a picture with picNumX.\n\\param \"dpb\"  \"Pointer to the AVCDecPicBuffer.\"\n\\param \"long_term_frame_idx\"  \"To look for.\"\n\\param \"picNumX\"    \"To look for.\"\n*/\nvoid unmark_long_term_field_for_reference_by_frame_idx(AVCCommonObj *video, AVCDecPicBuffer *dpb, uint long_term_frame_indx, int picNumX);\n\n/**\nThis function mark a frame to unused for reference.\n\\param \"fs\" \"Pointer to AVCFrameStore to be unmarked.\"\n*/\nvoid unmark_for_reference(AVCHandle *avcHandle, AVCDecPicBuffer *dpb, uint idx);\n\nvoid update_ref_list(AVCDecPicBuffer *dpb);\n\n\n/*---------- fmo.c --------------*/\n/**\nThis function initializes flexible macroblock reordering.\n\\param \"video\"  \"Pointer to AVCCommonObj.\"\n\\return \"AVC_SUCCESS for success and AVC_FAIL otherwise.\"\n*/\nOSCL_IMPORT_REF AVCStatus FMOInit(AVCCommonObj *video);\n\n/**\nThis function fills up an array that maps Map unit to the slice group\nfollowing the interleaved slice group map type.\n\\param \"mapUnitToSliceGroupMap\" \"Array of slice group mapping.\"\n\\param \"run_length_minus1\"  \"Array of the run-length.\"\n\\param \"num_slice_groups_minus_1\"   \"Number of slice group minus 1.\"\n\\param \"PicSizeInMapUnit\"   \"Size of the picture in number Map units.\"\n\\return \"Void.\"\n*/\nvoid FmoGenerateType0MapUnitMap(int *mapUnitToSliceGroupMap, uint *run_length_minus1, uint num_slice_groups_minus1, uint PicSizeInMapUnits);\n\n/**\nThis function fills up an array that maps Map unit to the slice group\nfollowing the dispersed slice group map type.\n\\param \"mapUnitToSliceGroupMap\" \"Array of slice group mapping.\"\n\\param \"PicWidthInMbs\"  \"Width of the luma picture in macroblock unit.\"\n\\param \"num_slice_groups_minus_1\"   \"Number of slice group minus 1.\"\n\\param \"PicSizeInMapUnit\"   \"Size of the picture in number Map units.\"\n\\return \"Void.\"\n*/\nvoid FmoGenerateType1MapUnitMap(int *mapUnitToSliceGroupMap, int PicWidthInMbs, uint num_slice_groups_minus1, uint PicSizeInMapUnits);\n\n/**\nThis function fills up an array that maps Map unit to the slice group\nfollowing the foreground with left-over slice group map type.\n\\param \"pps\"    \"Pointer to AVCPicParamSets structure.\"\n\\param \"mapUnitToSliceGroupMap\" \"Array of slice group mapping.\"\n\\param \"PicWidthInMbs\"  \"Width of the luma picture in macroblock unit.\"\n\\param \"num_slice_groups_minus_1\"   \"Number of slice group minus 1.\"\n\\param \"PicSizeInMapUnit\"   \"Size of the picture in number Map units.\"\n\\return \"Void.\"\n*/\nvoid FmoGenerateType2MapUnitMap(AVCPicParamSet *pps, int *mapUnitToSliceGroupMap, int PicWidthInMbs,\n                                uint num_slice_groups_minus1, uint PicSizeInMapUnits);\n\n/**\nThis function fills up an array that maps Map unit to the slice group\nfollowing the box-out slice group map type.\n\\param \"pps\"    \"Pointer to AVCPicParamSets structure.\"\n\\param \"mapUnitToSliceGroupMap\" \"Array of slice group mapping.\"\n\\param \"PicWidthInMbs\"  \"Width of the luma picture in macroblock unit.\"\n\\return \"Void.\"\n*/\nvoid FmoGenerateType3MapUnitMap(AVCCommonObj *video, AVCPicParamSet* pps, int *mapUnitToSliceGroupMap,\n                                int PicWidthInMbs);\n\n/**\nThis function fills up an array that maps Map unit to the slice group\nfollowing the raster scan slice group map type.\n\\param \"mapUnitToSliceGroupMap\" \"Array of slice group mapping.\"\n\\param \"MapUnitsInSliceGroup0\"  \"Derived in subclause 7.4.3.\"\n\\param \"slice_group_change_direction_flag\"  \"A value from the slice header.\"\n\\param \"PicSizeInMapUnit\"   \"Size of the picture in number Map units.\"\n\\return \"void\"\n*/\nvoid FmoGenerateType4MapUnitMap(int *mapUnitToSliceGroupMap, int MapUnitsInSliceGroup0,\n                                int slice_group_change_direction_flag, uint PicSizeInMapUnits);\n\n/**\nThis function fills up an array that maps Map unit to the slice group\nfollowing wipe slice group map type.\n\\param \"mapUnitToSliceGroupMap\" \"Array of slice group mapping.\"\n\\param \"video\"  \"Pointer to AVCCommonObj structure.\"\n\\param \"slice_group_change_direction_flag\"  \"A value from the slice header.\"\n\\param \"PicSizeInMapUnit\"   \"Size of the picture in number Map units.\"\n\\return \"void\"\n*/\nvoid FmoGenerateType5MapUnitMap(int *mapUnitsToSliceGroupMap, AVCCommonObj *video,\n                                int slice_group_change_direction_flag, uint PicSizeInMapUnits);\n\n/**\nThis function fills up an array that maps Map unit to the slice group\nfollowing wipe slice group map type.\n\\param \"mapUnitToSliceGroupMap\" \"Array of slice group mapping.\"\n\\param \"slice_group_id\" \"Array of slice_group_id from AVCPicParamSet structure.\"\n\\param \"PicSizeInMapUnit\"   \"Size of the picture in number Map units.\"\n\\return \"void\"\n*/\nvoid FmoGenerateType6MapUnitMap(int *mapUnitsToSliceGroupMap, int *slice_group_id, uint PicSizeInMapUnits);\n\n/*------------- itrans.c --------------*/\n/**\nThis function performs transformation of the Intra16x16DC value according to\nsubclause 8.5.6.\n\\param \"block\"  \"Pointer to the video->block[0][0][0].\"\n\\param \"QPy\"    \"Quantization parameter.\"\n\\return \"void.\"\n*/\nvoid Intra16DCTrans(int16 *block, int Qq, int Rq);\n\n/**\nThis function performs transformation of a 4x4 block according to\nsubclause 8.5.8.\n\\param \"block\"  \"Pointer to the origin of transform coefficient area.\"\n\\param \"pred\"   \"Pointer to the origin of predicted area.\"\n\\param \"cur\"    \"Pointer to the origin of the output area.\"\n\\param \"width\"  \"Pitch of cur.\"\n\\return \"void.\"\n*/\nvoid itrans(int16 *block, uint8 *pred, uint8 *cur, int width);\n\n/*\nThis function is the same one as itrans except for chroma.\n\\param \"block\"  \"Pointer to the origin of transform coefficient area.\"\n\\param \"pred\"   \"Pointer to the origin of predicted area.\"\n\\param \"cur\"    \"Pointer to the origin of the output area.\"\n\\param \"width\"  \"Pitch of cur.\"\n\\return \"void.\"\n*/\nvoid ictrans(int16 *block, uint8 *pred, uint8 *cur, int width);\n\n/**\nThis function performs transformation of the DCChroma value according to\nsubclause 8.5.7.\n\\param \"block\"  \"Pointer to the video->block[0][0][0].\"\n\\param \"QPc\"    \"Quantization parameter.\"\n\\return \"void.\"\n*/\nvoid ChromaDCTrans(int16 *block, int Qq, int Rq);\n\n/**\nThis function copies a block from pred to cur.\n\\param \"pred\"   \"Pointer to prediction block.\"\n\\param \"cur\"    \"Pointer to the current YUV block.\"\n\\param \"width\"  \"Pitch of cur memory.\"\n\\param \"pred_pitch\" \"Pitch for pred memory.\n\\return \"void.\"\n*/\nvoid copy_block(uint8 *pred, uint8 *cur, int width, int pred_pitch);\n\n/*--------- mb_access.c ----------------*/\n/**\nThis function initializes the neighboring information before start macroblock decoding.\n\\param \"video\"  \"Pointer to AVCCommonObj.\"\n\\param \"mbNum\"  \"The current macroblock index.\"\n\\param \"currMB\" \"Pointer to the current AVCMacroblock structure.\"\n\\return \"void\"\n*/\nOSCL_IMPORT_REF void InitNeighborAvailability(AVCCommonObj *video, int mbNum);\n\n/**\nThis function checks whether the requested neighboring macroblock is available.\n\\param \"MbToSliceGroupMap\"  \"Array containing the slice group ID mapping to MB index.\"\n\\param \"PicSizeInMbs\"   \"Size of the picture in number of MBs.\"\n\\param \"mbAddr\"     \"Neighboring macroblock index to check.\"\n\\param \"currMbAddr\" \"Current macroblock index.\"\n\\return \"TRUE if the neighboring MB is available, FALSE otherwise.\"\n*/\nbool mb_is_available(AVCMacroblock *mblock, uint PicSizeInMbs, int mbAddr, int currMbAddr);\n\n/**\nThis function performs prediction of the nonzero coefficient for a luma block (i,j).\n\\param \"video\"  \"Pointer to AVCCommonObj.\"\n\\param \"i\"  \"Block index, horizontal.\"\n\\param \"j\"  \"Block index, vertical.\"\n\\return \"Predicted number of nonzero coefficient.\"\n*/\nOSCL_IMPORT_REF int predict_nnz(AVCCommonObj *video, int i, int j);\n\n/**\nThis function performs prediction of the nonzero coefficient for a chroma block (i,j).\n\\param \"video\"  \"Pointer to AVCCommonObj.\"\n\\param \"i\"  \"Block index, horizontal.\"\n\\param \"j\"  \"Block index, vertical.\"\n\\return \"Predicted number of nonzero coefficient.\"\n*/\nOSCL_IMPORT_REF int predict_nnz_chroma(AVCCommonObj *video, int i, int j);\n\n/**\nThis function calculates the predicted motion vectors for the current macroblock.\n\\param \"video\" \"Pointer to AVCCommonObj.\"\n\\param \"encFlag\"    \"Boolean whether this function is used by encoder or decoder.\"\n\\return \"void.\"\n*/\nOSCL_IMPORT_REF void GetMotionVectorPredictor(AVCCommonObj *video, int encFlag);\n\n/*---------- reflist.c -----------------*/\n/**\nThis function initializes reference picture list used in INTER prediction\nat the beginning of each slice decoding. See subclause 8.2.4.\n\\param \"video\"  \"Pointer to AVCCommonObj.\"\n\\return \"void\"\nOutput is video->RefPicList0, video->RefPicList1, video->refList0Size and video->refList1Size.\n*/\nOSCL_IMPORT_REF void RefListInit(AVCCommonObj *video);\n\n/**\nThis function generates picture list from frame list. Used when current picture is field.\nsee subclause 8.2.4.2.5.\n\\param \"video\"  \"Pointer to AVCCommonObj.\"\n\\param \"IsL1\"   \"Is L1 list?\"\n\\param \"long_term\"  \"Is long-term prediction?\"\n\\return \"void\"\n*/\nvoid    GenPicListFromFrameList(AVCCommonObj *video, int IsL1, int long_term);\n\n/**\nThis function performs reference picture list reordering according to the\nref_pic_list_reordering() syntax. See subclause 8.2.4.3.\n\\param \"video\"  \"Pointer to AVCCommonObj.\"\n\\return \"AVC_SUCCESS or AVC_FAIL\"\nOutput is video->RefPicList0, video->RefPicList1, video->refList0Size and video->refList1Size.\n*/\nOSCL_IMPORT_REF AVCStatus ReOrderList(AVCCommonObj *video);\n\n/**\nThis function performs reference picture list reordering according to the\nref_pic_list_reordering() syntax regardless of list 0 or list 1. See subclause 8.2.4.3.\n\\param \"video\"  \"Pointer to AVCCommonObj.\"\n\\param \"isL1\"   \"Is list 1 or not.\"\n\\return \"AVC_SUCCESS or AVC_FAIL\"\nOutput is video->RefPicList0 and video->refList0Size or video->RefPicList1 and video->refList1Size.\n*/\nAVCStatus ReorderRefPicList(AVCCommonObj *video, int isL1);\n\n/**\nThis function performs reordering process of reference picture list for short-term pictures.\nSee subclause 8.2.4.3.1.\n\\param \"video\"  \"Pointer to AVCCommonObj.\"\n\\param \"picNumLX\"   \"picNumLX of an entry in the reference list.\"\n\\param \"refIdxLX\"   \"Pointer to the current entry index in the reference.\"\n\\param \"isL1\"       \"Is list 1 or not.\"\n\\return \"AVC_SUCCESS or AVC_FAIL\"\n*/\nAVCStatus ReorderShortTerm(AVCCommonObj *video, int picNumLX, int *refIdxLX, int isL1);\n\n/**\nThis function performs reordering process of reference picture list for long-term pictures.\nSee subclause 8.2.4.3.2.\n\\param \"video\"  \"Pointer to AVCCommonObj.\"\n\\param \"LongTermPicNum\" \"LongTermPicNum of an entry in the reference list.\"\n\\param \"refIdxLX\"   \"Pointer to the current entry index in the reference.\"\n\\param \"isL1\"       \"Is list 1 or not.\"\n\\return \"AVC_SUCCESS or AVC_FAIL\"\n*/\nAVCStatus ReorderLongTerm(AVCCommonObj *video, int LongTermPicNum, int *refIdxLX, int isL1);\n\n/**\nThis function gets the pictures in DPB according to the PicNum.\n\\param \"video\"  \"Pointer to AVCCommonObj.\"\n\\param \"picNum\" \"PicNum of the picture we are looking for.\"\n\\return \"Pointer to the AVCPictureData or NULL if not found\"\n*/\nAVCPictureData*  GetShortTermPic(AVCCommonObj *video, int picNum);\n\n/**\nThis function gets the pictures in DPB according to the LongtermPicNum.\n\\param \"video\"  \"Pointer to AVCCommonObj.\"\n\\param \"LongtermPicNum\" \"LongtermPicNum of the picture we are looking for.\"\n\\return \"Pointer to the AVCPictureData.\"\n*/\nAVCPictureData*  GetLongTermPic(AVCCommonObj *video, int LongtermPicNum);\n\n/**\nThis function indicates whether the picture is used for short-term reference or not.\n\\param \"s\"  \"Pointer to AVCPictureData.\"\n\\return \"1 if it is used for short-term, 0 otherwise.\"\n*/\nint is_short_ref(AVCPictureData *s);\n\n/**\nThis function indicates whether the picture is used for long-term reference or not.\n\\param \"s\"  \"Pointer to AVCPictureData.\"\n\\return \"1 if it is used for long-term, 0 otherwise.\"\n*/\nint is_long_ref(AVCPictureData *s);\n\n/**\nThis function sorts array of pointers to AVCPictureData in descending order of\nthe PicNum value.\n\\param \"data\"   \"Array of pointers to AVCPictureData.\"\n\\param \"num\"    \"Size of the array.\"\n\\return \"void\"\n*/\nvoid SortPicByPicNum(AVCPictureData *data[], int num);\n\n/**\nThis function sorts array of pointers to AVCPictureData in ascending order of\nthe PicNum value.\n\\param \"data\"   \"Array of pointers to AVCPictureData.\"\n\\param \"num\"    \"Size of the array.\"\n\\return \"void\"\n*/\nvoid SortPicByPicNumLongTerm(AVCPictureData *data[], int num);\n\n/**\nThis function sorts array of pointers to AVCFrameStore in descending order of\nthe FrameNumWrap value.\n\\param \"data\"   \"Array of pointers to AVCFrameStore.\"\n\\param \"num\"    \"Size of the array.\"\n\\return \"void\"\n*/\nvoid SortFrameByFrameNumWrap(AVCFrameStore *data[], int num);\n\n/**\nThis function sorts array of pointers to AVCFrameStore in ascending order of\nthe LongTermFrameIdx value.\n\\param \"data\"   \"Array of pointers to AVCFrameStore.\"\n\\param \"num\"    \"Size of the array.\"\n\\return \"void\"\n*/\nvoid SortFrameByLTFrameIdx(AVCFrameStore *data[], int num);\n\n/**\nThis function sorts array of pointers to AVCPictureData in descending order of\nthe PicOrderCnt value.\n\\param \"data\"   \"Array of pointers to AVCPictureData.\"\n\\param \"num\"    \"Size of the array.\"\n\\return \"void\"\n*/\nvoid SortPicByPOC(AVCPictureData *data[], int num, int descending);\n\n/**\nThis function sorts array of pointers to AVCPictureData in ascending order of\nthe LongTermPicNum value.\n\\param \"data\"   \"Array of pointers to AVCPictureData.\"\n\\param \"num\"    \"Size of the array.\"\n\\return \"void\"\n*/\nvoid SortPicByLTPicNum(AVCPictureData *data[], int num);\n\n/**\nThis function sorts array of pointers to AVCFrameStore in descending order of\nthe PicOrderCnt value.\n\\param \"data\"   \"Array of pointers to AVCFrameStore.\"\n\\param \"num\"    \"Size of the array.\"\n\\return \"void\"\n*/\nvoid SortFrameByPOC(AVCFrameStore *data[], int num, int descending);\n\n\n#endif /* _AVCCOMMON_LIB_H_ */\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/common/src/deblock.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"avclib_common.h\"\n#include \"oscl_mem.h\"\n\n#define MAX_QP 51\n#define MB_BLOCK_SIZE 16\n\n// NOTE: these 3 tables are for funtion GetStrength() only\nconst static int ININT_STRENGTH[4] = {0x04040404, 0x03030303, 0x03030303, 0x03030303};\n\n\n// NOTE: these 3 tables are for funtion EdgeLoop() only\n// NOTE: to change the tables below for instance when the QP doubling is changed from 6 to 8 values\n\nconst static int ALPHA_TABLE[52]  = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 5, 6,  7, 8, 9, 10, 12, 13, 15, 17,  20, 22, 25, 28, 32, 36, 40, 45,  50, 56, 63, 71, 80, 90, 101, 113,  127, 144, 162, 182, 203, 226, 255, 255} ;\nconst static int BETA_TABLE[52]   = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 3,  3, 3, 3, 4, 4, 4, 6, 6,   7, 7, 8, 8, 9, 9, 10, 10,  11, 11, 12, 12, 13, 13, 14, 14,   15, 15, 16, 16, 17, 17, 18, 18} ;\nconst static int CLIP_TAB[52][5]  =\n{\n    { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0},\n    { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0},\n    { 0, 0, 0, 0, 0}, { 0, 0, 0, 1, 1}, { 0, 0, 0, 1, 1}, { 0, 0, 0, 1, 1}, { 0, 0, 0, 1, 1}, { 0, 0, 1, 1, 1}, { 0, 0, 1, 1, 1}, { 0, 1, 1, 1, 1},\n    { 0, 1, 1, 1, 1}, { 0, 1, 1, 1, 1}, { 0, 1, 1, 1, 1}, { 0, 1, 1, 2, 2}, { 0, 1, 1, 2, 2}, { 0, 1, 1, 2, 2}, { 0, 1, 1, 2, 2}, { 0, 1, 2, 3, 3},\n    { 0, 1, 2, 3, 3}, { 0, 2, 2, 3, 3}, { 0, 2, 2, 4, 4}, { 0, 2, 3, 4, 4}, { 0, 2, 3, 4, 4}, { 0, 3, 3, 5, 5}, { 0, 3, 4, 6, 6}, { 0, 3, 4, 6, 6},\n    { 0, 4, 5, 7, 7}, { 0, 4, 5, 8, 8}, { 0, 4, 6, 9, 9}, { 0, 5, 7, 10, 10}, { 0, 6, 8, 11, 11}, { 0, 6, 8, 13, 13}, { 0, 7, 10, 14, 14}, { 0, 8, 11, 16, 16},\n    { 0, 9, 12, 18, 18}, { 0, 10, 13, 20, 20}, { 0, 11, 15, 23, 23}, { 0, 13, 17, 25, 25}\n};\n\n// NOTE: this table is only QP clipping, index = QP + video->FilterOffsetA/B, clipped to [0, 51]\n//       video->FilterOffsetA/B is in {-12, 12]\nconst static int QP_CLIP_TAB[76] =\n{\n    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,              // [-12, 0]\n    1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,\n    13, 14, 15, 16, 17, 18, 19, 20, 21,\n    22, 23, 24, 25, 26, 27, 28, 29, 30,\n    31, 32, 33, 34, 35, 36, 37, 38, 39,\n    40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, // [1, 51]\n    51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51      // [52,63]\n};\n\nstatic void DeblockMb(AVCCommonObj *video, int mb_x, int mb_y, uint8 *SrcY, uint8 *SrcU, uint8 *SrcV);\n//static void GetStrength(AVCCommonObj *video, uint8 *Strength, AVCMacroblock* MbP, AVCMacroblock* MbQ, int dir, int edge);\nstatic void GetStrength_Edge0(uint8 *Strength, AVCMacroblock* MbP, AVCMacroblock* MbQ, int dir);\nstatic void GetStrength_VerticalEdges(uint8 *Strength, AVCMacroblock* MbQ);\nstatic void GetStrength_HorizontalEdges(uint8 Strength[12], AVCMacroblock* MbQ);\nstatic void EdgeLoop_Luma_vertical(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch);\nstatic void EdgeLoop_Luma_horizontal(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch);\nstatic void EdgeLoop_Chroma_vertical(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch);\nstatic void EdgeLoop_Chroma_horizontal(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch);\n\n/*\n *****************************************************************************************\n * \\brief Filter all macroblocks in order of increasing macroblock address.\n *****************************************************************************************\n*/\n\nOSCL_EXPORT_REF AVCStatus DeblockPicture(AVCCommonObj *video)\n{\n    uint   i, j;\n    int   pitch = video->currPic->pitch, pitch_c, width;\n    uint8 *SrcY, *SrcU, *SrcV;\n\n    SrcY = video->currPic->Sl;      // pointers to source\n    SrcU = video->currPic->Scb;\n    SrcV = video->currPic->Scr;\n    pitch_c = pitch >> 1;\n    width = video->currPic->width;\n\n    for (i = 0; i < video->PicHeightInMbs; i++)\n    {\n        for (j = 0; j < video->PicWidthInMbs; j++)\n        {\n            DeblockMb(video, j, i, SrcY, SrcU, SrcV);\n            // update SrcY, SrcU, SrcV\n            SrcY += MB_BLOCK_SIZE;\n            SrcU += (MB_BLOCK_SIZE >> 1);\n            SrcV += (MB_BLOCK_SIZE >> 1);\n        }\n\n        SrcY += ((pitch << 4) - width);\n        SrcU += ((pitch_c << 3) - (width >> 1));\n        SrcV += ((pitch_c << 3) - (width >> 1));\n    }\n\n    return AVC_SUCCESS;\n}\n\n#ifdef MB_BASED_DEBLOCK\n/*\n *****************************************************************************************\n * \\brief Filter one macroblocks in a fast macroblock memory and copy it to frame\n *****************************************************************************************\n*/\nvoid MBInLoopDeblock(AVCCommonObj *video)\n{\n    AVCPictureData *currPic = video->currPic;\n#ifdef USE_PRED_BLOCK\n    uint8 *predCb, *predCr, *pred_block;\n    int i, j, dst_width, dst_height, dst_widthc, dst_heightc;\n#endif\n    int pitch = currPic->pitch;\n    int x_pos = video->mb_x;\n    int y_pos = video->mb_y;\n    uint8 *curL, *curCb, *curCr;\n    int offset;\n\n    offset = (y_pos << 4) * pitch;\n\n    curL = currPic->Sl + offset + (x_pos << 4);\n\n    offset >>= 2;\n    offset += (x_pos << 3);\n\n    curCb = currPic->Scb + offset;\n    curCr = currPic->Scr + offset;\n\n#ifdef USE_PRED_BLOCK\n    pred_block = video->pred;\n\n    /* 1. copy neighboring pixels from frame to the video->pred_block */\n    if (y_pos) /* not the 0th row */\n    {\n        /* copy to the top 4 lines of the macroblock */\n        curL -= (pitch << 2); /* go back 4 lines */\n\n        oscl_memcpy(pred_block + 4, curL, 16);\n        curL += pitch;\n        oscl_memcpy(pred_block + 24, curL, 16);\n        curL += pitch;\n        oscl_memcpy(pred_block + 44, curL, 16);\n        curL += pitch;\n        oscl_memcpy(pred_block + 64, curL, 16);\n        curL += pitch;\n\n        curCb -= (pitch << 1); /* go back 4 lines chroma */\n        curCr -= (pitch << 1);\n\n        pred_block += 400;\n\n        oscl_memcpy(pred_block + 4, curCb, 8);\n        curCb += (pitch >> 1);\n        oscl_memcpy(pred_block + 16, curCb, 8);\n        curCb += (pitch >> 1);\n        oscl_memcpy(pred_block + 28, curCb, 8);\n        curCb += (pitch >> 1);\n        oscl_memcpy(pred_block + 40, curCb, 8);\n        curCb += (pitch >> 1);\n\n        pred_block += 144;\n        oscl_memcpy(pred_block + 4, curCr, 8);\n        curCr += (pitch >> 1);\n        oscl_memcpy(pred_block + 16, curCr, 8);\n        curCr += (pitch >> 1);\n        oscl_memcpy(pred_block + 28, curCr, 8);\n        curCr += (pitch >> 1);\n        oscl_memcpy(pred_block + 40, curCr, 8);\n        curCr += (pitch >> 1);\n\n        pred_block = video->pred;\n    }\n\n    /* 2. perform deblocking. */\n    DeblockMb(video, x_pos, y_pos, pred_block + 84, pred_block + 452, pred_block + 596);\n\n    /* 3. copy it back to the frame and update pred_block */\n    predCb = pred_block + 400;\n    predCr = predCb + 144;\n\n    /* find the range of the block inside pred_block to be copied back */\n    if (y_pos)  /* the first row */\n    {\n        curL -= (pitch << 2);\n        curCb -= (pitch << 1);\n        curCr -= (pitch << 1);\n\n        dst_height = 20;\n        dst_heightc = 12;\n    }\n    else\n    {\n        pred_block += 80;\n        predCb += 48;\n        predCr += 48;\n        dst_height = 16;\n        dst_heightc = 8;\n    }\n\n    if (x_pos) /* find the width */\n    {\n        curL -= 4;\n        curCb -= 4;\n        curCr -= 4;\n        if (x_pos == (int)(video->PicWidthInMbs - 1))\n        {\n            dst_width = 20;\n            dst_widthc = 12;\n        }\n        else\n        {\n            dst_width = 16;\n            dst_widthc = 8;\n        }\n    }\n    else\n    {\n        pred_block += 4;\n        predCb += 4;\n        predCr += 4;\n        dst_width = 12;\n        dst_widthc = 4;\n    }\n\n    /* perform copy */\n    for (j = 0; j < dst_height; j++)\n    {\n        oscl_memcpy(curL, pred_block, dst_width);\n        curL += pitch;\n        pred_block += 20;\n    }\n    for (j = 0; j < dst_heightc; j++)\n    {\n        oscl_memcpy(curCb, predCb, dst_widthc);\n        oscl_memcpy(curCr, predCr, dst_widthc);\n        curCb += (pitch >> 1);\n        curCr += (pitch >> 1);\n        predCb += 12;\n        predCr += 12;\n    }\n\n    if (x_pos != (int)(video->PicWidthInMbs - 1)) /* now copy from the right-most 4 columns to the left-most 4 columns */\n    {\n        pred_block = video->pred;\n        for (i = 0; i < 20; i += 4)\n        {\n            *((uint32*)pred_block) = *((uint32*)(pred_block + 16));\n            pred_block += 20;\n            *((uint32*)pred_block) = *((uint32*)(pred_block + 16));\n            pred_block += 20;\n            *((uint32*)pred_block) = *((uint32*)(pred_block + 16));\n            pred_block += 20;\n            *((uint32*)pred_block) = *((uint32*)(pred_block + 16));\n            pred_block += 20;\n        }\n\n        for (i = 0; i < 24; i += 4)\n        {\n            *((uint32*)pred_block) = *((uint32*)(pred_block + 8));\n            pred_block += 12;\n            *((uint32*)pred_block) = *((uint32*)(pred_block + 8));\n            pred_block += 12;\n            *((uint32*)pred_block) = *((uint32*)(pred_block + 8));\n            pred_block += 12;\n            *((uint32*)pred_block) = *((uint32*)(pred_block + 8));\n            pred_block += 12;\n        }\n\n    }\n#else\n    DeblockMb(video, x_pos, y_pos, curL, curCb, curCr);\n#endif\n\n    return ;\n}\n#endif\n\n/*\n *****************************************************************************************\n * \\brief Deblocking filter for one macroblock.\n *****************************************************************************************\n */\n\nvoid DeblockMb(AVCCommonObj *video, int mb_x, int mb_y, uint8 *SrcY, uint8 *SrcU, uint8 *SrcV)\n{\n    AVCMacroblock *MbP, *MbQ;\n    int     edge, QP, QPC;\n    int     filterLeftMbEdgeFlag = (mb_x != 0);\n    int     filterTopMbEdgeFlag  = (mb_y != 0);\n    int     pitch = video->currPic->pitch;\n    int     indexA, indexB, tmp;\n    int     Alpha, Beta, Alpha_c, Beta_c;\n    int     mbNum = mb_y * video->PicWidthInMbs + mb_x;\n    int     *clipTable, *clipTable_c, *qp_clip_tab;\n    uint8   Strength[16];\n    void*     str;\n\n    MbQ = &(video->mblock[mbNum]);      // current Mb\n\n\n    // If filter is disabled, return\n    if (video->sliceHdr->disable_deblocking_filter_idc == 1) return;\n\n    if (video->sliceHdr->disable_deblocking_filter_idc == 2)\n    {\n        // don't filter at slice boundaries\n        filterLeftMbEdgeFlag = mb_is_available(video->mblock, video->PicSizeInMbs, mbNum - 1, mbNum);\n        filterTopMbEdgeFlag  = mb_is_available(video->mblock, video->PicSizeInMbs, mbNum - video->PicWidthInMbs, mbNum);\n    }\n\n    /* NOTE: edge=0 and edge=1~3 are separate cases because of the difference of MbP, index A and indexB calculation */\n    /*       for edge = 1~3, MbP, indexA and indexB remain the same, and thus there is no need to re-calculate them for each edge */\n\n    qp_clip_tab = (int *)QP_CLIP_TAB + 12;\n\n    /* 1.VERTICAL EDGE + MB BOUNDARY (edge = 0) */\n    if (filterLeftMbEdgeFlag)\n    {\n        MbP = MbQ - 1;\n        //GetStrength(video, Strength, MbP, MbQ, 0, 0); // Strength for 4 blks in 1 stripe, 0 => vertical edge\n        GetStrength_Edge0(Strength, MbP, MbQ, 0);\n\n        str = (void*)Strength; //de-ref type-punned pointer fix\n        if (*((uint32*)str))    // only if one of the 4 Strength bytes is != 0\n        {\n            QP = (MbP->QPy + MbQ->QPy + 1) >> 1; // Average QP of the two blocks;\n            indexA = QP + video->FilterOffsetA;\n            indexB = QP + video->FilterOffsetB;\n            indexA = qp_clip_tab[indexA]; // IClip(0, MAX_QP, QP+video->FilterOffsetA)\n            indexB = qp_clip_tab[indexB]; // IClip(0, MAX_QP, QP+video->FilterOffsetB)\n\n            Alpha  = ALPHA_TABLE[indexA];\n            Beta = BETA_TABLE[indexB];\n            clipTable = (int *) CLIP_TAB[indexA];\n\n            if (Alpha > 0 && Beta > 0)\n#ifdef USE_PRED_BLOCK\n                EdgeLoop_Luma_vertical(SrcY, Strength,  Alpha, Beta, clipTable, 20);\n#else\n                EdgeLoop_Luma_vertical(SrcY, Strength,  Alpha, Beta, clipTable, pitch);\n#endif\n\n            QPC = (MbP->QPc + MbQ->QPc + 1) >> 1;\n            indexA = QPC + video->FilterOffsetA;\n            indexB = QPC + video->FilterOffsetB;\n            indexA = qp_clip_tab[indexA]; // IClip(0, MAX_QP, QP+video->FilterOffsetA)\n            indexB = qp_clip_tab[indexB]; // IClip(0, MAX_QP, QP+video->FilterOffsetB)\n\n            Alpha  = ALPHA_TABLE[indexA];\n            Beta = BETA_TABLE[indexB];\n            clipTable = (int *) CLIP_TAB[indexA];\n            if (Alpha > 0 && Beta > 0)\n            {\n#ifdef USE_PRED_BLOCK\n                EdgeLoop_Chroma_vertical(SrcU, Strength, Alpha, Beta, clipTable, 12);\n                EdgeLoop_Chroma_vertical(SrcV, Strength, Alpha, Beta, clipTable, 12);\n#else\n                EdgeLoop_Chroma_vertical(SrcU, Strength, Alpha, Beta, clipTable, pitch >> 1);\n                EdgeLoop_Chroma_vertical(SrcV, Strength, Alpha, Beta, clipTable, pitch >> 1);\n#endif\n            }\n        }\n\n    } /* end of: if(filterLeftMbEdgeFlag) */\n\n    /* 2.VERTICAL EDGE (no boundary), the edges are all inside a MB */\n    /* First calculate the necesary parameters all at once, outside the loop */\n    MbP = MbQ;\n\n    indexA = MbQ->QPy + video->FilterOffsetA;\n    indexB = MbQ->QPy + video->FilterOffsetB;\n    //  index\n    indexA = qp_clip_tab[indexA]; // IClip(0, MAX_QP, QP+video->FilterOffsetA)\n    indexB = qp_clip_tab[indexB]; // IClip(0, MAX_QP, QP+video->FilterOffsetB)\n\n    Alpha = ALPHA_TABLE[indexA];\n    Beta = BETA_TABLE[indexB];\n    clipTable = (int *)CLIP_TAB[indexA];\n\n    /* Save Alpha,  Beta and clipTable for future use, with the obselete variables filterLeftMbEdgeFlag, mbNum amd tmp */\n    filterLeftMbEdgeFlag = Alpha;\n    mbNum = Beta;\n    tmp = (int)clipTable;\n\n    indexA = MbQ->QPc + video->FilterOffsetA;\n    indexB = MbQ->QPc + video->FilterOffsetB;\n    indexA = qp_clip_tab[indexA]; // IClip(0, MAX_QP, QP+video->FilterOffsetA)\n    indexB = qp_clip_tab[indexB]; // IClip(0, MAX_QP, QP+video->FilterOffsetB)\n\n    Alpha_c  = ALPHA_TABLE[indexA];\n    Beta_c = BETA_TABLE[indexB];\n    clipTable_c = (int *)CLIP_TAB[indexA];\n\n    GetStrength_VerticalEdges(Strength + 4, MbQ); // Strength for 4 blks in 1 stripe, 0 => vertical edge\n\n    for (edge = 1; edge < 4; edge++)  // 4 vertical strips of 16 pel\n    {\n        //GetStrength_VerticalEdges(video, Strength, MbP, MbQ, 0, edge); // Strength for 4 blks in 1 stripe, 0 => vertical edge\n        if (*((int*)(Strength + (edge << 2))))   // only if one of the 4 Strength bytes is != 0\n        {\n            if (Alpha > 0 && Beta > 0)\n#ifdef USE_PRED_BLOCK\n                EdgeLoop_Luma_vertical(SrcY + (edge << 2), Strength + (edge << 2),  Alpha, Beta, clipTable, 20);\n#else\n                EdgeLoop_Luma_vertical(SrcY + (edge << 2), Strength + (edge << 2),  Alpha, Beta, clipTable, pitch);\n#endif\n\n            if (!(edge & 1) && Alpha_c > 0 && Beta_c > 0)\n            {\n#ifdef USE_PRED_BLOCK\n                EdgeLoop_Chroma_vertical(SrcU + (edge << 1), Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, 12);\n                EdgeLoop_Chroma_vertical(SrcV + (edge << 1), Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, 12);\n#else\n                EdgeLoop_Chroma_vertical(SrcU + (edge << 1), Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, pitch >> 1);\n                EdgeLoop_Chroma_vertical(SrcV + (edge << 1), Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, pitch >> 1);\n#endif\n            }\n        }\n\n    } //end edge\n\n\n\n    /* 3.HORIZONTAL EDGE + MB BOUNDARY (edge = 0) */\n    if (filterTopMbEdgeFlag)\n    {\n        MbP = MbQ - video->PicWidthInMbs;\n        //GetStrength(video, Strength, MbP, MbQ, 1, 0); // Strength for 4 blks in 1 stripe, 0 => vertical edge\n        GetStrength_Edge0(Strength, MbP, MbQ, 1);\n        str = (void*)Strength; //de-ref type-punned pointer fix\n        if (*((uint32*)str))    // only if one of the 4 Strength bytes is != 0\n        {\n            QP = (MbP->QPy + MbQ->QPy + 1) >> 1; // Average QP of the two blocks;\n            indexA = QP + video->FilterOffsetA;\n            indexB = QP + video->FilterOffsetB;\n            indexA = qp_clip_tab[indexA]; // IClip(0, MAX_QP, QP+video->FilterOffsetA)\n            indexB = qp_clip_tab[indexB]; // IClip(0, MAX_QP, QP+video->FilterOffsetB)\n\n            Alpha  = ALPHA_TABLE[indexA];\n            Beta = BETA_TABLE[indexB];\n            clipTable = (int *)CLIP_TAB[indexA];\n\n            if (Alpha > 0 && Beta > 0)\n            {\n#ifdef USE_PRED_BLOCK\n                EdgeLoop_Luma_horizontal(SrcY, Strength,  Alpha, Beta, clipTable, 20);\n#else\n                EdgeLoop_Luma_horizontal(SrcY, Strength,  Alpha, Beta, clipTable, pitch);\n#endif\n            }\n\n            QPC = (MbP->QPc + MbQ->QPc + 1) >> 1;\n            indexA = QPC + video->FilterOffsetA;\n            indexB = QPC + video->FilterOffsetB;\n            indexA = qp_clip_tab[indexA]; // IClip(0, MAX_QP, QP+video->FilterOffsetA)\n            indexB = qp_clip_tab[indexB]; // IClip(0, MAX_QP, QP+video->FilterOffsetB)\n\n            Alpha  = ALPHA_TABLE[indexA];\n            Beta = BETA_TABLE[indexB];\n            clipTable = (int *)CLIP_TAB[indexA];\n            if (Alpha > 0 && Beta > 0)\n            {\n#ifdef USE_PRED_BLOCK\n                EdgeLoop_Chroma_horizontal(SrcU, Strength, Alpha, Beta, clipTable, 12);\n                EdgeLoop_Chroma_horizontal(SrcV, Strength, Alpha, Beta, clipTable, 12);\n#else\n                EdgeLoop_Chroma_horizontal(SrcU, Strength, Alpha, Beta, clipTable, pitch >> 1);\n                EdgeLoop_Chroma_horizontal(SrcV, Strength, Alpha, Beta, clipTable, pitch >> 1);\n#endif\n            }\n        }\n\n    } /* end of: if(filterTopMbEdgeFlag) */\n\n\n    /* 4.HORIZONTAL EDGE (no boundary), the edges are inside a MB */\n    MbP = MbQ;\n\n    /* Recover Alpha,  Beta and clipTable for edge!=0 with the variables filterLeftMbEdgeFlag, mbNum and tmp */\n    /* Note that Alpha_c, Beta_c and clipTable_c for chroma is already calculated */\n    Alpha = filterLeftMbEdgeFlag;\n    Beta = mbNum;\n    clipTable = (int *)tmp;\n\n    GetStrength_HorizontalEdges(Strength + 4, MbQ); // Strength for 4 blks in 1 stripe, 0 => vertical edge\n\n    for (edge = 1; edge < 4; edge++)  // 4 horicontal strips of 16 pel\n    {\n        //GetStrength(video, Strength, MbP, MbQ, 1, edge); // Strength for 4 blks in 1 stripe   1 => horizontal edge\n        if (*((int*)(Strength + (edge << 2)))) // only if one of the 4 Strength bytes is != 0\n        {\n            if (Alpha > 0 && Beta > 0)\n            {\n#ifdef USE_PRED_BLOCK\n                EdgeLoop_Luma_horizontal(SrcY + (edge << 2)*20, Strength + (edge << 2),  Alpha, Beta, clipTable, 20);\n#else\n                EdgeLoop_Luma_horizontal(SrcY + (edge << 2)*pitch, Strength + (edge << 2),  Alpha, Beta, clipTable, pitch);\n#endif\n            }\n\n            if (!(edge & 1) && Alpha_c > 0 && Beta_c > 0)\n            {\n#ifdef USE_PRED_BLOCK\n                EdgeLoop_Chroma_horizontal(SrcU + (edge << 1)*12, Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, 12);\n                EdgeLoop_Chroma_horizontal(SrcV + (edge << 1)*12, Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, 12);\n#else\n                EdgeLoop_Chroma_horizontal(SrcU + (edge << 1)*(pitch >> 1), Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, pitch >> 1);\n                EdgeLoop_Chroma_horizontal(SrcV + (edge << 1)*(pitch >> 1), Strength + (edge << 2), Alpha_c, Beta_c, clipTable_c, pitch >> 1);\n#endif\n            }\n        }\n\n    } //end edge\n\n    return;\n}\n\n/*\n *****************************************************************************************************\n * \\brief   returns a buffer of 4 Strength values for one stripe in a mb (for different Frame types)\n *****************************************************************************************************\n*/\n\nvoid GetStrength_Edge0(uint8 *Strength, AVCMacroblock* MbP, AVCMacroblock* MbQ, int dir)\n{\n    int tmp;\n    int16 *ptrQ, *ptrP;\n    void* vptr;\n    uint8 *pStrength;\n    void* refIdx;\n\n    if (MbP->mbMode == AVC_I4 || MbP->mbMode == AVC_I16 ||\n            MbQ->mbMode == AVC_I4 || MbQ->mbMode == AVC_I16)\n    {\n\n        *((int*)Strength) = ININT_STRENGTH[0];      // Start with Strength=3. or Strength=4 for Mb-edge\n\n    }\n    else // if not intra or SP-frame\n    {\n        *((int*)Strength) = 0;\n\n        if (dir == 0)  // Vertical Edge 0\n        {\n\n            //1. Check the ref_frame_id\n            refIdx = (void*) MbQ->RefIdx; //de-ref type-punned pointer fix\n            ptrQ = (int16*)refIdx;\n            refIdx = (void*)MbP->RefIdx; //de-ref type-punned pointer fix\n            ptrP = (int16*)refIdx;\n            pStrength = Strength;\n            if (ptrQ[0] != ptrP[1]) pStrength[0] = 1;\n            if (ptrQ[2] != ptrP[3]) pStrength[2] = 1;\n            pStrength[1] = pStrength[0];\n            pStrength[3] = pStrength[2];\n\n            //2. Check the non-zero coeff blocks (4x4)\n            if (MbQ->nz_coeff[0] != 0 || MbP->nz_coeff[3] != 0) pStrength[0] = 2;\n            if (MbQ->nz_coeff[4] != 0 || MbP->nz_coeff[7] != 0) pStrength[1] = 2;\n            if (MbQ->nz_coeff[8] != 0 || MbP->nz_coeff[11] != 0) pStrength[2] = 2;\n            if (MbQ->nz_coeff[12] != 0 || MbP->nz_coeff[15] != 0) pStrength[3] = 2;\n\n            //3. Only need to check the mv difference\n            vptr = (void*)MbQ->mvL0;  // for deref type-punned pointer\n            ptrQ = (int16*)vptr;\n            ptrP = (int16*)(MbP->mvL0 + 3); // points to 4x4 block #3 (the 4th column)\n\n            // 1st blk\n            if (*pStrength == 0)\n            {\n                // check |mv difference| >= 4\n                tmp = *ptrQ++ - *ptrP++;\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStrength = 1;\n\n                tmp = *ptrQ-- - *ptrP--;\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStrength = 1;\n            }\n\n            pStrength++;\n            ptrQ += 8;\n            ptrP += 8;\n\n            // 2nd blk\n            if (*pStrength == 0)\n            {\n                // check |mv difference| >= 4\n                tmp = *ptrQ++ - *ptrP++;\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStrength = 1;\n\n                tmp = *ptrQ-- - *ptrP--;\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStrength = 1;\n            }\n\n            pStrength++;\n            ptrQ += 8;\n            ptrP += 8;\n\n            // 3rd blk\n            if (*pStrength == 0)\n            {\n                // check |mv difference| >= 4\n                tmp = *ptrQ++ - *ptrP++;\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStrength = 1;\n\n                tmp = *ptrQ-- - *ptrP--;\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStrength = 1;\n            }\n\n            pStrength++;\n            ptrQ += 8;\n            ptrP += 8;\n\n            // 4th blk\n            if (*pStrength == 0)\n            {\n                // check |mv difference| >= 4\n                tmp = *ptrQ++ - *ptrP++;\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStrength = 1;\n\n                tmp = *ptrQ-- - *ptrP--;\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStrength = 1;\n            }\n        }\n        else   // Horizontal Edge 0\n        {\n\n            //1. Check the ref_frame_id\n            refIdx = (void*)MbQ->RefIdx;  //de-ref type-punned pointer\n            ptrQ = (int16*)refIdx;\n            refIdx = (void*)MbP->RefIdx;  //de-ref type-punned pointer\n            ptrP = (int16*)refIdx;\n            pStrength = Strength;\n            if (ptrQ[0] != ptrP[2]) pStrength[0] = 1;\n            if (ptrQ[1] != ptrP[3]) pStrength[2] = 1;\n            pStrength[1] = pStrength[0];\n            pStrength[3] = pStrength[2];\n\n            //2. Check the non-zero coeff blocks (4x4)\n            if (MbQ->nz_coeff[0] != 0 || MbP->nz_coeff[12] != 0) pStrength[0] = 2;\n            if (MbQ->nz_coeff[1] != 0 || MbP->nz_coeff[13] != 0) pStrength[1] = 2;\n            if (MbQ->nz_coeff[2] != 0 || MbP->nz_coeff[14] != 0) pStrength[2] = 2;\n            if (MbQ->nz_coeff[3] != 0 || MbP->nz_coeff[15] != 0) pStrength[3] = 2;\n\n            //3. Only need to check the mv difference\n            vptr = (void*)MbQ->mvL0;\n            ptrQ = (int16*)vptr;\n            ptrP = (int16*)(MbP->mvL0 + 12); // points to 4x4 block #12 (the 4th row)\n\n            // 1st blk\n            if (*pStrength == 0)\n            {\n                // check |mv difference| >= 4\n                tmp = *ptrQ++ - *ptrP++;\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStrength = 1;\n\n                tmp = *ptrQ-- - *ptrP--;\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStrength = 1;\n            }\n\n            pStrength++;\n            ptrQ += 2;\n            ptrP += 2;\n\n            // 2nd blk\n            if (*pStrength  == 0)\n            {\n                // check |mv difference| >= 4\n                tmp = *ptrQ++ - *ptrP++;\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStrength = 1;\n\n                tmp = *ptrQ-- - *ptrP--;\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStrength = 1;\n            }\n\n            pStrength++;\n            ptrQ += 2;\n            ptrP += 2;\n\n            // 3rd blk\n            if (*pStrength  == 0)\n            {\n                // check |mv difference| >= 4\n                tmp = *ptrQ++ - *ptrP++;\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStrength = 1;\n\n                tmp = *ptrQ-- - *ptrP--;\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStrength = 1;\n            }\n\n            pStrength++;\n            ptrQ += 2;\n            ptrP += 2;\n\n            // 4th blk\n            if (*pStrength  == 0)\n            {\n                // check |mv difference| >= 4\n                tmp = *ptrQ++ - *ptrP++;\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStrength = 1;\n\n                tmp = *ptrQ-- - *ptrP--;\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStrength = 1;\n            }\n\n        } /* end of: else if(dir == 0) */\n\n    } /* end of: if( !(MbP->mbMode == AVC_I4 ...) */\n}\n\n\nvoid GetStrength_VerticalEdges(uint8 *Strength, AVCMacroblock* MbQ)\n{\n    int     idx, tmp;\n    int16   *ptr, *pmvx, *pmvy;\n    uint8   *pnz;\n    uint8   *pStrength, *pStr;\n    void* refIdx;\n\n    if (MbQ->mbMode == AVC_I4 || MbQ->mbMode == AVC_I16)\n    {\n        *((int*)Strength)     = ININT_STRENGTH[1];      // Start with Strength=3. or Strength=4 for Mb-edge\n        *((int*)(Strength + 4)) = ININT_STRENGTH[2];\n        *((int*)(Strength + 8)) = ININT_STRENGTH[3];\n    }\n    else   // Not intra or SP-frame\n    {\n\n        *((int*)Strength)     = 0; // for non-intra MB, strength = 0, 1 or 2.\n        *((int*)(Strength + 4)) = 0;\n        *((int*)(Strength + 8)) = 0;\n\n        //1. Check the ref_frame_id\n        refIdx = (void*)MbQ->RefIdx;  //de-ref type-punned pointer fix\n        ptr = (int16*)refIdx;\n        pStrength = Strength;\n        if (ptr[0] != ptr[1]) pStrength[4] = 1;\n        if (ptr[2] != ptr[3]) pStrength[6] = 1;\n        pStrength[5] = pStrength[4];\n        pStrength[7] = pStrength[6];\n\n        //2. Check the nz_coeff block and mv difference\n        pmvx = (int16*)(MbQ->mvL0 + 1); // points to 4x4 block #1,not #0\n        pmvy = pmvx + 1;\n        for (idx = 0; idx < 4; idx += 2) // unroll the loop, make 4 iterations to 2\n        {\n            // first/third row : 1,2,3 or 9,10,12\n            // Strength = 2 for a whole row\n            pnz = MbQ->nz_coeff + (idx << 2);\n            if (*pnz++ != 0) *pStrength = 2;\n            if (*pnz++ != 0)\n            {\n                *pStrength = 2;\n                *(pStrength + 4) = 2;\n            }\n            if (*pnz++ != 0)\n            {\n                *(pStrength + 4) = 2;\n                *(pStrength + 8) = 2;\n            }\n            if (*pnz != 0) *(pStrength + 8) = 2;\n\n            // Then Strength = 1\n            if (*pStrength == 0)\n            {\n                //within the same 8x8 block, no need to check the reference id\n                //only need to check the |mv difference| >= 4\n                tmp = *pmvx - *(pmvx - 2);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStrength = 1;\n\n                tmp = *pmvy - *(pmvy - 2);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStrength = 1;\n            }\n\n            pmvx += 2;\n            pmvy += 2;\n            pStr = pStrength + 4;\n\n            if (*pStr == 0)\n            {\n                //check the |mv difference| >= 4\n                tmp = *pmvx - *(pmvx - 2);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStr = 1;\n\n                tmp = *pmvy - *(pmvy - 2);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStr = 1;\n            }\n\n            pmvx += 2;\n            pmvy += 2;\n            pStr = pStrength + 8;\n\n            if (*pStr == 0)\n            {\n                //within the same 8x8 block, no need to check the reference id\n                //only need to check the |mv difference| >= 4\n                tmp = *pmvx - *(pmvx - 2);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStr = 1;\n\n                tmp = *pmvy - *(pmvy - 2);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStr = 1;\n            }\n\n            // Second/fourth row: 5,6,7 or 14,15,16\n            // Strength = 2 for a whole row\n            pnz = MbQ->nz_coeff + ((idx + 1) << 2);\n            if (*pnz++ != 0) *(pStrength + 1) = 2;\n            if (*pnz++ != 0)\n            {\n                *(pStrength + 1) = 2;\n                *(pStrength + 5) = 2;\n            }\n            if (*pnz++ != 0)\n            {\n                *(pStrength + 5) = 2;\n                *(pStrength + 9) = 2;\n            }\n            if (*pnz != 0) *(pStrength + 9) = 2;\n\n            // Then Strength = 1\n            pmvx += 4;\n            pmvy += 4;\n            pStr = pStrength + 1;\n            if (*pStr == 0)\n            {\n                //within the same 8x8 block, no need to check the reference id\n                //only need to check the |mv difference| >= 4\n                tmp = *pmvx - *(pmvx - 2);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStr = 1;\n\n                tmp = *pmvy - *(pmvy - 2);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStr = 1;\n            }\n\n            pmvx += 2;\n            pmvy += 2;\n            pStr = pStrength + 5;\n\n            if (*pStr == 0)\n            {\n                //check the |mv difference| >= 4\n                tmp = *pmvx - *(pmvx - 2);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStr = 1;\n\n                tmp = *pmvy - *(pmvy - 2);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStr = 1;\n            }\n\n            pmvx += 2;\n            pmvy += 2;\n            pStr = pStrength + 9;\n\n            if (*pStr == 0)\n            {\n                //within the same 8x8 block, no need to check the reference id\n                //only need to check the |mv difference| >= 4\n                tmp = *pmvx - *(pmvx - 2);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStr = 1;\n\n                tmp = *pmvy - *(pmvy - 2);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStr = 1;\n            }\n\n            // update some variables for the next two rows\n            pmvx += 4;\n            pmvy += 4;\n            pStrength += 2;\n\n        } /* end of: for(idx=0; idx<2; idx++) */\n\n    } /* end of: else if( MbQ->mbMode == AVC_I4 ...) */\n}\n\n\nvoid GetStrength_HorizontalEdges(uint8 Strength[12], AVCMacroblock* MbQ)\n{\n    int     idx, tmp;\n    int16   *ptr, *pmvx, *pmvy;\n    uint8   *pStrength, *pStr;\n    void* refIdx;\n\n    if (MbQ->mbMode == AVC_I4 || MbQ->mbMode == AVC_I16)\n    {\n        *((int*)Strength)     = ININT_STRENGTH[1];      // Start with Strength=3. or Strength=4 for Mb-edge\n        *((int*)(Strength + 4)) = ININT_STRENGTH[2];\n        *((int*)(Strength + 8)) = ININT_STRENGTH[3];\n    }\n    else   // Not intra or SP-frame\n    {\n\n        *((int*)Strength)     = 0; // for non-intra MB, strength = 0, 1 or 2.\n        *((int*)(Strength + 4)) = 0; // for non-intra MB, strength = 0, 1 or 2.\n        *((int*)(Strength + 8)) = 0; // for non-intra MB, strength = 0, 1 or 2.\n\n\n        //1. Check the ref_frame_id\n        refIdx = (void*) MbQ->RefIdx; // de-ref type-punned fix\n        ptr = (int16*) refIdx;\n        pStrength = Strength;\n        if (ptr[0] != ptr[2]) pStrength[4] = 1;\n        if (ptr[1] != ptr[3]) pStrength[6] = 1;\n        pStrength[5] = pStrength[4];\n        pStrength[7] = pStrength[6];\n\n        //2. Check the nz_coeff block and mv difference\n        pmvx = (int16*)(MbQ->mvL0 + 4); // points to 4x4 block #4,not #0\n        pmvy = pmvx + 1;\n        for (idx = 0; idx < 4; idx += 2) // unroll the loop, make 4 iterations to 2\n        {\n            // first/third row : 1,2,3 or 9,10,12\n            // Strength = 2 for a whole row\n            if (MbQ->nz_coeff[idx] != 0) *pStrength = 2;\n            if (MbQ->nz_coeff[4+idx] != 0)\n            {\n                *pStrength = 2;\n                *(pStrength + 4) = 2;\n            }\n            if (MbQ->nz_coeff[8+idx] != 0)\n            {\n                *(pStrength + 4) = 2;\n                *(pStrength + 8) = 2;\n            }\n            if (MbQ->nz_coeff[12+idx] != 0) *(pStrength + 8) = 2;\n\n            // Then Strength = 1\n            if (*pStrength == 0)\n            {\n                //within the same 8x8 block, no need to check the reference id\n                //only need to check the |mv difference| >= 4\n                tmp = *pmvx - *(pmvx - 8);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStrength = 1;\n\n                tmp = *pmvy - *(pmvy - 8);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStrength = 1;\n            }\n\n            pmvx += 8;\n            pmvy += 8;\n            pStr = pStrength + 4;\n\n            if (*pStr == 0)\n            {\n                //check the |mv difference| >= 4\n                tmp = *pmvx - *(pmvx - 8);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStr = 1;\n\n                tmp = *pmvy - *(pmvy - 8);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStr = 1;\n            }\n\n            pmvx += 8;\n            pmvy += 8;\n            pStr = pStrength + 8;\n\n            if (*pStr == 0)\n            {\n                //within the same 8x8 block, no need to check the reference id\n                //only need to check the |mv difference| >= 4\n                tmp = *pmvx - *(pmvx - 8);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStr = 1;\n\n                tmp = *pmvy - *(pmvy - 8);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStr = 1;\n            }\n\n            // Second/fourth row: 5,6,7 or 14,15,16\n            // Strength = 2 for a whole row\n            if (MbQ->nz_coeff[idx+1] != 0) *(pStrength + 1) = 2;\n            if (MbQ->nz_coeff[4+idx+1] != 0)\n            {\n                *(pStrength + 1) = 2;\n                *(pStrength + 5) = 2;\n            }\n            if (MbQ->nz_coeff[8+idx+1] != 0)\n            {\n                *(pStrength + 5) = 2;\n                *(pStrength + 9) = 2;\n            }\n            if (MbQ->nz_coeff[12+idx+1] != 0) *(pStrength + 9) = 2;\n\n            // Then Strength = 1\n            pmvx -= 14;\n            pmvy -= 14; // -14 = -16 + 2\n            pStr = pStrength + 1;\n            if (*pStr == 0)\n            {\n                //within the same 8x8 block, no need to check the reference id\n                //only need to check the |mv difference| >= 4\n                tmp = *pmvx - *(pmvx - 8);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStr = 1;\n\n                tmp = *pmvy - *(pmvy - 8);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStr = 1;\n            }\n\n            pmvx += 8;\n            pmvy += 8;\n            pStr = pStrength + 5;\n\n            if (*pStr == 0)\n            {\n                //check the |mv difference| >= 4\n                tmp = *pmvx - *(pmvx - 8);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStr = 1;\n\n                tmp = *pmvy - *(pmvy - 8);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStr = 1;\n            }\n\n            pmvx += 8;\n            pmvy += 8;\n            pStr = pStrength + 9;\n\n            if (*pStr == 0)\n            {\n                //within the same 8x8 block, no need to check the reference id\n                //only need to check the |mv difference| >= 4\n                tmp = *pmvx - *(pmvx - 8);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStr = 1;\n\n                tmp = *pmvy - *(pmvy - 8);\n                if (tmp < 0) tmp = -tmp;\n                if (tmp >= 4) *pStr = 1;\n            }\n\n            // update some variables for the next two rows\n            pmvx -= 14;\n            pmvy -= 14; // -14 = -16 + 2\n            pStrength += 2;\n\n        } /* end of: for(idx=0; idx<2; idx++) */\n\n    } /* end of: else if( MbQ->mbMode == AVC_I4 ...) */\n}\n\n/*\n *****************************************************************************************\n * \\brief  Filters one edge of 16 (luma) or 8 (chroma) pel\n *****************************************************************************************\n*/\n\nvoid EdgeLoop_Luma_horizontal(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch)\n{\n    int  pel, ap = 0, aq = 0, Strng;\n    int  C0, c0, dif, AbsDelta, tmp, tmp1;\n    int  L2 = 0, L1, L0, R0, R1, R2 = 0, RL0;\n\n\n    if (Strength[0] == 4)  /* INTRA strong filtering */\n    {\n        for (pel = 0; pel < 16; pel++)\n        {\n            R0  = SrcPtr[0];\n            R1  = SrcPtr[pitch];\n            L0  = SrcPtr[-pitch];\n            L1  = SrcPtr[-(pitch<<1)];\n\n            // |R0 - R1| < Beta\n            tmp1 = R0 - R1;\n            if (tmp1 < 0) tmp1 = -tmp1;\n            tmp = (tmp1 - Beta);\n\n            //|L0 - L1| < Beta\n            tmp1 = L0 - L1;\n            if (tmp1 < 0) tmp1 = -tmp1;\n            tmp &= (tmp1 - Beta);\n\n            //|R0 - L0| < Alpha\n            AbsDelta = R0 - L0;\n            if (AbsDelta < 0) AbsDelta = -AbsDelta;\n            tmp &= (AbsDelta - Alpha);\n\n            if (tmp < 0)\n            {\n                AbsDelta -= ((Alpha >> 2) + 2);\n                R2 = SrcPtr[pitch<<1]; //inc2\n                L2 = SrcPtr[-(pitch+(pitch<<1))]; // -inc3\n\n                // |R0 - R2| < Beta && |R0 - L0| < (Alpha/4 + 2)\n                tmp = R0 - R2;\n                if (tmp < 0) tmp = -tmp;\n                aq = AbsDelta & (tmp - Beta);\n\n                // |L0 - L2| < Beta && |R0 - L0| < (Alpha/4 + 2)\n                tmp = L0 - L2;\n                if (tmp < 0) tmp = -tmp;\n                ap = AbsDelta & (tmp - Beta);\n\n                if (aq < 0)\n                {\n                    tmp = R1 + R0 + L0;\n                    SrcPtr[0] = (L1 + (tmp << 1) +  R2 + 4) >> 3;\n                    tmp += R2;\n                    SrcPtr[pitch]  = (tmp + 2) >> 2;\n                    SrcPtr[pitch<<1] = (((SrcPtr[(pitch+(pitch<<1))] + R2) << 1) + tmp + 4) >> 3;\n                }\n                else\n                    SrcPtr[0] = ((R1 << 1) + R0 + L1 + 2) >> 2;\n\n                if (ap < 0)\n                {\n                    tmp = L1 + R0 + L0;\n                    SrcPtr[-pitch]  = (R1 + (tmp << 1) +  L2 + 4) >> 3;\n                    tmp += L2;\n                    SrcPtr[-(pitch<<1)] = (tmp + 2) >> 2;\n                    SrcPtr[-(pitch+(pitch<<1))] = (((SrcPtr[-(pitch<<2)] + L2) << 1) + tmp + 4) >> 3;\n                }\n                else\n                    SrcPtr[-pitch] = ((L1 << 1) + L0 + R1 + 2) >> 2;\n\n            } /* if(tmp < 0) */\n\n            SrcPtr ++; // Increment to next set of pixel\n\n        } /* end of: for(pel=0; pel<16; pel++) */\n\n    } /* if(Strength[0] == 4) */\n\n    else   /* Normal filtering */\n    {\n        for (pel = 0; pel < 16; pel++)\n        {\n            Strng = Strength[pel >> 2];\n            if (Strng)\n            {\n                R0  = SrcPtr[0];\n                R1  = SrcPtr[pitch];\n                L0  = SrcPtr[-pitch];\n                L1  = SrcPtr[-(pitch<<1)]; // inc2\n\n                //|R0 - L0| < Alpha\n                tmp1 = R0 - L0;\n                if (tmp1 < 0) tmp1 = -tmp1;\n                tmp = (tmp1 - Alpha);\n\n                // |R0 - R1| < Beta\n                tmp1 = R0 - R1;\n                if (tmp1 < 0) tmp1 = -tmp1;\n                tmp &= (tmp1 - Beta);\n\n                //|L0 - L1| < Beta\n                tmp1 = L0 - L1;\n                if (tmp1 < 0) tmp1 = -tmp1;\n                tmp &= (tmp1 - Beta);\n\n                if (tmp < 0)\n                {\n                    R2 = SrcPtr[pitch<<1]; //inc2\n                    L2 = SrcPtr[-(pitch+(pitch<<1))]; // -inc3\n\n                    // |R0 - R2| < Beta\n                    tmp = R0 - R2;\n                    if (tmp < 0) tmp = -tmp;\n                    aq = tmp - Beta;\n\n                    // |L0 - L2| < Beta\n                    tmp = L0 - L2;\n                    if (tmp < 0) tmp = -tmp;\n                    ap = tmp - Beta;\n\n\n                    c0 = C0 = clipTable[Strng];\n                    if (ap < 0) c0++;\n                    if (aq < 0) c0++;\n\n                    //dif = IClip(-c0, c0, ((Delta << 2) + (L1 - R1) + 4) >> 3);\n                    dif = (((R0 - L0) << 2) + (L1 - R1) + 4) >> 3;\n                    tmp = dif + c0;\n                    if ((uint)tmp > (uint)c0 << 1)\n                    {\n                        tmp = ~(tmp >> 31);\n                        dif = (tmp & (c0 << 1)) - c0;\n                    }\n\n                    //SrcPtr[0]    = (uint8)IClip(0, 255, R0 - dif);\n                    //SrcPtr[-inc] = (uint8)IClip(0, 255, L0 + dif);\n                    RL0 = R0 + L0;\n                    R0 -= dif;\n                    L0 += dif;\n                    if ((uint)R0 > 255)\n                    {\n                        tmp = ~(R0 >> 31);\n                        R0 = tmp & 255;\n                    }\n                    if ((uint)L0 > 255)\n                    {\n                        tmp = ~(L0 >> 31);\n                        L0 = tmp & 255;\n                    }\n                    SrcPtr[-pitch] = L0;\n                    SrcPtr[0] = R0;\n\n                    if (C0 != 0) /* Multiple zeros in the clip tables */\n                    {\n                        if (aq < 0)  // SrcPtr[inc]   += IClip(-C0, C0,(R2 + ((RL0 + 1) >> 1) - (R1<<1)) >> 1);\n                        {\n                            R2 = (R2 + ((RL0 + 1) >> 1) - (R1 << 1)) >> 1;\n                            tmp = R2 + C0;\n                            if ((uint)tmp > (uint)C0 << 1)\n                            {\n                                tmp = ~(tmp >> 31);\n                                R2 = (tmp & (C0 << 1)) - C0;\n                            }\n                            SrcPtr[pitch] += R2;\n                        }\n\n                        if (ap < 0)  //SrcPtr[-inc2] += IClip(-C0, C0,(L2 + ((RL0 + 1) >> 1) - (L1<<1)) >> 1);\n                        {\n                            L2 = (L2 + ((RL0 + 1) >> 1) - (L1 << 1)) >> 1;\n                            tmp = L2 + C0;\n                            if ((uint)tmp > (uint)C0 << 1)\n                            {\n                                tmp = ~(tmp >> 31);\n                                L2 = (tmp & (C0 << 1)) - C0;\n                            }\n                            SrcPtr[-(pitch<<1)] += L2;\n                        }\n                    }\n\n                } /* if(tmp < 0) */\n\n            } /* end of:  if((Strng = Strength[pel >> 2])) */\n\n            SrcPtr ++; // Increment to next set of pixel\n\n        } /* for(pel=0; pel<16; pel++) */\n\n    } /* else if(Strength[0] == 4) */\n}\n\nvoid EdgeLoop_Luma_vertical(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch)\n{\n    int  pel, ap = 1, aq = 1;\n    int  C0, c0, dif, AbsDelta, Strng, tmp, tmp1;\n    int  L2 = 0, L1, L0, R0, R1, R2 = 0;\n    uint8 *ptr, *ptr1;\n    register uint R_in, L_in;\n    uint R_out, L_out;\n\n\n    if (Strength[0] == 4)  /* INTRA strong filtering */\n    {\n\n        for (pel = 0; pel < 16; pel++)\n        {\n\n            // Read 8 pels\n            R_in = *((uint *)SrcPtr);       // R_in = {R3, R2, R1, R0}\n            L_in = *((uint *)(SrcPtr - 4)); // L_in = {L0, L1, L2, L3}\n            R1   = (R_in >> 8) & 0xff;\n            R0   = R_in & 0xff;\n            L0   = L_in >> 24;\n            L1   = (L_in >> 16) & 0xff;\n\n            // |R0 - R1| < Beta\n            tmp1 = (R_in & 0xff) - R1;\n            if (tmp1 < 0) tmp1 = -tmp1;\n            tmp = (tmp1 - Beta);\n\n\n            //|L0 - L1| < Beta\n            tmp1 = (L_in >> 24) - L1;\n            if (tmp1 < 0) tmp1 = -tmp1;\n            tmp &= (tmp1 - Beta);\n\n            //|R0 - L0| < Alpha\n            AbsDelta = (R_in & 0xff) - (L_in >> 24);\n            if (AbsDelta < 0) AbsDelta = -AbsDelta;\n            tmp &= (AbsDelta - Alpha);\n\n            if (tmp < 0)\n            {\n                AbsDelta -= ((Alpha >> 2) + 2);\n                R2   = (R_in >> 16) & 0xff;\n                L2   = (L_in >> 8) & 0xff;\n\n                // |R0 - R2| < Beta && |R0 - L0| < (Alpha/4 + 2)\n                tmp1 = (R_in & 0xff) - R2;\n                if (tmp1 < 0) tmp1 = -tmp1;\n                aq = AbsDelta & (tmp1 - Beta);\n\n                // |L0 - L2| < Beta && |R0 - L0| < (Alpha/4 + 2)\n                tmp1 = (L_in >> 24) - L2;\n                if (tmp1 < 0) tmp1 = -tmp1;\n                ap = AbsDelta & (tmp1 - Beta);\n\n\n                ptr = SrcPtr;\n                if (aq < 0)\n                {\n                    R_out = (R_in >> 24) << 24; // Keep R3 at the fourth byte\n\n                    tmp  = R0 + L0 + R1;\n                    R_out |= (((tmp << 1) +  L1 + R2 + 4) >> 3);\n                    tmp += R2;\n                    R_out |= (((tmp + 2) >> 2) << 8);\n                    tmp1 = ((R_in >> 24) + R2) << 1;\n                    R_out |= (((tmp1 + tmp + 4) >> 3) << 16);\n\n                    *((uint *)SrcPtr) = R_out;\n                }\n                else\n                    *ptr = ((R1 << 1) + R0 + L1 + 2) >> 2;\n\n\n                if (ap < 0)\n                {\n                    L_out = (L_in << 24) >> 24; // Keep L3 at the first byte\n\n                    tmp  = R0 + L0 + L1;\n                    L_out |= ((((tmp << 1) + R1 + L2 + 4) >> 3) << 24);\n                    tmp += L2;\n                    L_out |= (((tmp + 2) >> 2) << 16);\n                    tmp1 = ((L_in & 0xff) + L2) << 1;\n                    L_out |= (((tmp1 + tmp + 4) >> 3) << 8);\n\n                    *((uint *)(SrcPtr - 4)) = L_out;\n                }\n                else\n                    *(--ptr) = ((L1 << 1) + L0 + R1 + 2) >> 2;\n\n            } /* if(tmp < 0) */\n\n            SrcPtr += pitch;    // Increment to next set of pixel\n\n        } /* end of: for(pel=0; pel<16; pel++) */\n\n    } /* if(Strength[0] == 4) */\n\n    else   /* Normal filtering */\n    {\n\n        for (pel = 0; pel < 16; pel++)\n        {\n            Strng = Strength[pel >> 2];\n            if (Strng)\n            {\n                // Read 8 pels\n                R_in = *((uint *)SrcPtr);       // R_in = {R3, R2, R1, R0}\n                L_in = *((uint *)(SrcPtr - 4)); // L_in = {L0, L1, L2, L3}\n                R1   = (R_in >> 8) & 0xff;\n                R0   = R_in & 0xff;\n                L0   = L_in >> 24;\n                L1   = (L_in >> 16) & 0xff;\n\n                //|R0 - L0| < Alpha\n                tmp = R0 - L0;\n                if (tmp < 0) tmp = -tmp;\n                tmp -= Alpha;\n\n                // |R0 - R1| < Beta\n                tmp1 = R0 - R1;\n                if (tmp1 < 0) tmp1 = -tmp1;\n                tmp &= (tmp1 - Beta);\n\n                //|L0 - L1| < Beta\n                tmp1 = L0 - L1;\n                if (tmp1 < 0) tmp1 = -tmp1;\n                tmp &= (tmp1 - Beta);\n\n                if (tmp < 0)\n                {\n                    L2 = SrcPtr[-3];\n                    R2 = SrcPtr[2];\n\n                    // |R0 - R2| < Beta\n                    tmp = R0 - R2;\n                    if (tmp < 0) tmp = -tmp;\n                    aq = tmp - Beta;\n\n                    // |L0 - L2| < Beta\n                    tmp = L0 - L2;\n                    if (tmp < 0) tmp = -tmp;\n                    ap = tmp - Beta;\n\n\n                    c0 = C0 = clipTable[Strng];\n                    if (ap < 0) c0++;\n                    if (aq < 0) c0++;\n\n                    //dif = IClip(-c0, c0, ((Delta << 2) + (L1 - R1) + 4) >> 3);\n                    dif = (((R0 - L0) << 2) + (L1 - R1) + 4) >> 3;\n                    tmp = dif + c0;\n                    if ((uint)tmp > (uint)c0 << 1)\n                    {\n                        tmp = ~(tmp >> 31);\n                        dif = (tmp & (c0 << 1)) - c0;\n                    }\n\n                    ptr = SrcPtr;\n                    ptr1 = SrcPtr - 1;\n                    //SrcPtr[0]    = (uint8)IClip(0, 255, R0 - dif);\n                    //SrcPtr[-inc] = (uint8)IClip(0, 255, L0 + dif);\n                    R_in = R0 - dif;\n                    L_in = L0 + dif; /* cannot re-use R0 and L0 here */\n                    if ((uint)R_in > 255)\n                    {\n                        tmp = ~((int)R_in >> 31);\n                        R_in = tmp & 255;\n                    }\n                    if ((uint)L_in > 255)\n                    {\n                        tmp = ~((int)L_in >> 31);\n                        L_in = tmp & 255;\n                    }\n                    *ptr1-- = L_in;\n                    *ptr++  = R_in;\n\n                    if (C0 != 0) // Multiple zeros in the clip tables\n                    {\n                        if (ap < 0)  //SrcPtr[-inc2] += IClip(-C0, C0,(L2 + ((RL0 + 1) >> 1) - (L1<<1)) >> 1);\n                        {\n                            L2 = (L2 + ((R0 + L0 + 1) >> 1) - (L1 << 1)) >> 1;\n                            tmp = L2 + C0;\n                            if ((uint)tmp > (uint)C0 << 1)\n                            {\n                                tmp = ~(tmp >> 31);\n                                L2 = (tmp & (C0 << 1)) - C0;\n                            }\n                            *ptr1 += L2;\n                        }\n\n                        if (aq < 0)  // SrcPtr[inc] += IClip(-C0, C0,(R2 + ((RL0 + 1) >> 1) - (R1<<1)) >> 1);\n                        {\n                            R2 = (R2 + ((R0 + L0 + 1) >> 1) - (R1 << 1)) >> 1;\n                            tmp = R2 + C0;\n                            if ((uint)tmp > (uint)C0 << 1)\n                            {\n                                tmp = ~(tmp >> 31);\n                                R2 = (tmp & (C0 << 1)) - C0;\n                            }\n                            *ptr += R2;\n                        }\n                    }\n\n                } /* if(tmp < 0) */\n\n            } /* end of:  if((Strng = Strength[pel >> 2])) */\n\n            SrcPtr += pitch;    // Increment to next set of pixel\n\n        } /* for(pel=0; pel<16; pel++) */\n\n    } /* else if(Strength[0] == 4) */\n\n}\n\nvoid EdgeLoop_Chroma_vertical(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch)\n{\n    int     pel, Strng;\n    int     c0, dif;\n    int     L1, L0, R0, R1, tmp, tmp1;\n    uint8   *ptr;\n    uint    R_in, L_in;\n\n\n    for (pel = 0; pel < 16; pel++)\n    {\n        Strng = Strength[pel>>2];\n        if (Strng)\n        {\n            // Read 8 pels\n            R_in = *((uint *)SrcPtr);       // R_in = {R3, R2, R1, R0}\n            L_in = *((uint *)(SrcPtr - 4)); // L_in = {L0, L1, L2, L3}\n            R1   = (R_in >> 8) & 0xff;\n            R0   = R_in & 0xff;\n            L0   = L_in >> 24;\n            L1   = (L_in >> 16) & 0xff;\n\n            // |R0 - R1| < Beta\n            tmp1 = R0 - R1;\n            if (tmp1 < 0) tmp1 = -tmp1;\n            tmp = (tmp1 - Beta);\n\n            //|L0 - L1| < Beta\n            tmp1 = L0 - L1;\n            if (tmp1 < 0) tmp1 = -tmp1;\n            tmp &= (tmp1 - Beta);\n\n            //|R0 - L0| < Alpha\n            tmp1 = R0 - L0;\n            if (tmp1 < 0) tmp1 = -tmp1;\n            tmp &= (tmp1 - Alpha);\n\n            if (tmp < 0)\n            {\n                ptr = SrcPtr;\n                if (Strng == 4) /* INTRA strong filtering */\n                {\n                    *ptr-- = ((R1 << 1) + R0 + L1 + 2) >> 2;\n                    *ptr   = ((L1 << 1) + L0 + R1 + 2) >> 2;\n                }\n                else  /* normal filtering */\n                {\n                    c0  = clipTable[Strng] + 1;\n                    //dif = IClip(-c0, c0, ((Delta << 2) + (L1 - R1) + 4) >> 3);\n                    dif = (((R0 - L0) << 2) + (L1 - R1) + 4) >> 3;\n                    tmp = dif + c0;\n                    if ((uint)tmp > (uint)c0 << 1)\n                    {\n                        tmp = ~(tmp >> 31);\n                        dif = (tmp & (c0 << 1)) - c0;\n                    }\n\n                    //SrcPtr[0]    = (uint8)IClip(0, 255, R0 - dif);\n                    //SrcPtr[-inc] = (uint8)IClip(0, 255, L0 + dif);\n                    L0 += dif;\n                    R0 -= dif;\n                    if ((uint)L0 > 255)\n                    {\n                        tmp = ~(L0 >> 31);\n                        L0 = tmp & 255;\n                    }\n                    if ((uint)R0 > 255)\n                    {\n                        tmp = ~(R0 >> 31);\n                        R0 = tmp & 255;\n                    }\n\n                    *ptr-- = R0;\n                    *ptr = L0;\n                }\n            }\n            pel ++;\n            SrcPtr += pitch;   // Increment to next set of pixel\n\n        } /* end of: if((Strng = Strength[pel >> 2])) */\n        else\n        {\n            pel += 3;\n            SrcPtr += (pitch << 1); //PtrInc << 1;\n        }\n\n    } /* end of: for(pel=0; pel<16; pel++) */\n}\n\n\nvoid EdgeLoop_Chroma_horizontal(uint8* SrcPtr, uint8 *Strength, int Alpha, int Beta, int *clipTable, int pitch)\n{\n    int  pel, Strng;\n    int  c0, dif;\n    int  L1, L0, R0, R1, tmp, tmp1;\n\n    for (pel = 0; pel < 16; pel++)\n    {\n        Strng = Strength[pel>>2];\n        if (Strng)\n        {\n            R0  = SrcPtr[0];\n            L0  = SrcPtr[-pitch];\n            L1  = SrcPtr[-(pitch<<1)]; //inc2\n            R1  = SrcPtr[pitch];\n\n            // |R0 - R1| < Beta\n            tmp1 = R0 - R1;\n            if (tmp1 < 0) tmp1 = -tmp1;\n            tmp = (tmp1 - Beta);\n\n            //|L0 - L1| < Beta\n            tmp1 = L0 - L1;\n            if (tmp1 < 0) tmp1 = -tmp1;\n            tmp &= (tmp1 - Beta);\n\n            //|R0 - L0| < Alpha\n            tmp1 = R0 - L0;\n            if (tmp1 < 0) tmp1 = -tmp1;\n            tmp &= (tmp1 - Alpha);\n\n            if (tmp < 0)\n            {\n                if (Strng == 4) /* INTRA strong filtering */\n                {\n                    SrcPtr[0]      = ((R1 << 1) + R0 + L1 + 2) >> 2;\n                    SrcPtr[-pitch] = ((L1 << 1) + L0 + R1 + 2) >> 2;\n                }\n                else  /* normal filtering */\n                {\n                    c0  = clipTable[Strng] + 1;\n                    //dif = IClip(-c0, c0, ((Delta << 2) + (L1 - R1) + 4) >> 3);\n                    dif = (((R0 - L0) << 2) + (L1 - R1) + 4) >> 3;\n                    tmp = dif + c0;\n                    if ((uint)tmp > (uint)c0 << 1)\n                    {\n                        tmp = ~(tmp >> 31);\n                        dif = (tmp & (c0 << 1)) - c0;\n                    }\n\n                    //SrcPtr[-inc] = (uint8)IClip(0, 255, L0 + dif);\n                    //SrcPtr[0]    = (uint8)IClip(0, 255, R0 - dif);\n                    L0 += dif;\n                    R0 -= dif;\n                    if ((uint)L0 > 255)\n                    {\n                        tmp = ~(L0 >> 31);\n                        L0 = tmp & 255;\n                    }\n                    if ((uint)R0 > 255)\n                    {\n                        tmp = ~(R0 >> 31);\n                        R0 = tmp & 255;\n                    }\n                    SrcPtr[0] = R0;\n                    SrcPtr[-pitch] = L0;\n                }\n            }\n\n            pel ++;\n            SrcPtr ++; // Increment to next set of pixel\n\n        } /* end of: if((Strng = Strength[pel >> 2])) */\n        else\n        {\n            pel += 3;\n            SrcPtr += 2;\n        }\n\n    } /* end of: for(pel=0; pel<16; pel++) */\n}\n\n\n\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/common/src/dpb.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"avclib_common.h\"\n\n// xxx pa\n#define LOG_TAG \"dbp\"\n#include \"android/log.h\"\n\n\n#define DPB_MEM_ATTR 0\n\nAVCStatus InitDPB(AVCHandle *avcHandle, AVCCommonObj *video, int FrameHeightInMbs, int PicWidthInMbs, bool padding)\n{\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"InitDPB(int FrameHeightInMbs <%d>, int PicWidthInMbs <%d>, bool padding <%d>)\", FrameHeightInMbs, PicWidthInMbs, padding);\n\n    AVCDecPicBuffer *dpb = video->decPicBuf;\n    int level, framesize, num_fs;\n    void *userData = avcHandle->userData;\n#ifndef PV_MEMORY_POOL\n    uint32 addr;\n#endif\n    uint16 refIdx = 0;\n    level = video->currSeqParams->level_idc;\n\n    for (num_fs = 0; num_fs < MAX_FS; num_fs++)\n    {\n        dpb->fs[num_fs] = NULL;\n    }\n\n    framesize = (int)(((FrameHeightInMbs * PicWidthInMbs) << 7) * 3);\n    if (padding)\n    {\n        video->padded_size = (int)((((FrameHeightInMbs + 2) * (PicWidthInMbs + 2)) << 7) * 3) - framesize;\n    }\n    else\n    {\n        video->padded_size = 0;\n    }\n\n#ifndef PV_MEMORY_POOL\n    if (dpb->decoded_picture_buffer)\n    {\n        avcHandle->CBAVC_Free(userData, (int)dpb->decoded_picture_buffer);\n        dpb->decoded_picture_buffer = NULL;\n    }\n#endif\n    /* need to allocate one extra frame for current frame, DPB only defines for reference frames */\n\n    dpb->num_fs = (uint32)(MaxDPBX2[mapLev2Idx[level]] << 2) / (3 * FrameHeightInMbs * PicWidthInMbs) + 1;\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"InitDPB dpb->num_fs = %d\", dpb->num_fs);\n\n    if (dpb->num_fs > MAX_FS)\n    {\n        dpb->num_fs = MAX_FS;\n    }\n\n    if (video->currSeqParams->num_ref_frames + 1 > (uint32)dpb->num_fs)\n    {\n        dpb->num_fs = video->currSeqParams->num_ref_frames + 1;\n    }\n\n    dpb->dpb_size = dpb->num_fs * (framesize + video->padded_size);\n//  dpb->dpb_size = (uint32)MaxDPBX2[mapLev2Idx[level]]*512 + framesize;\n\n#ifndef PV_MEMORY_POOL\n    dpb->decoded_picture_buffer = (uint8*) avcHandle->CBAVC_Malloc(userData, dpb->dpb_size, 100/*DPB_MEM_ATTR*/);\n\n    if (dpb->decoded_picture_buffer == NULL || dpb->decoded_picture_buffer&0x3) // not word aligned\n        return AVC_MEMORY_FAIL;\n#endif\n    dpb->used_size = 0;\n    num_fs = 0;\n\n    while (num_fs < dpb->num_fs)\n    {\n        /*  fs is an array pointers to AVCDecPicture */\n        dpb->fs[num_fs] = (AVCFrameStore*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCFrameStore), 101/*DEFAULT_ATTR*/);\n        if (dpb->fs[num_fs] == NULL)\n        {\n            return AVC_MEMORY_FAIL;\n        }\n#ifndef PV_MEMORY_POOL\n        /* assign the actual memory for Sl, Scb, Scr */\n        dpb->fs[num_fs]->base_dpb = dpb->decoded_picture_buffer + dpb->used_size;\n#endif\n        dpb->fs[num_fs]->IsReference = 0;\n        dpb->fs[num_fs]->IsLongTerm = 0;\n        dpb->fs[num_fs]->IsOutputted = 3;\n        dpb->fs[num_fs]->frame.RefIdx = refIdx++; /* this value will remain unchanged through out the encoding session */\n        dpb->fs[num_fs]->frame.picType = AVC_FRAME;\n        dpb->fs[num_fs]->frame.isLongTerm = 0;\n        dpb->fs[num_fs]->frame.isReference = 0;\n        video->RefPicList0[num_fs] = &(dpb->fs[num_fs]->frame);\n        dpb->fs[num_fs]->frame.padded = 0;\n        dpb->used_size += (framesize + video->padded_size);\n        num_fs++;\n    }\n\n    return AVC_SUCCESS;\n}\n\nOSCL_EXPORT_REF AVCStatus AVCConfigureSequence(AVCHandle *avcHandle, AVCCommonObj *video, bool padding)\n{\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"AVCConfigureSequence\");\n\n    void *userData = avcHandle->userData;\n    AVCDecPicBuffer *dpb = video->decPicBuf;\n    int framesize, ii; /* size of one frame */\n    uint PicWidthInMbs, PicHeightInMapUnits, FrameHeightInMbs, PicSizeInMapUnits;\n    uint num_fs;\n    /* derived variables from SPS */\n    PicWidthInMbs = video->currSeqParams->pic_width_in_mbs_minus1 + 1;\n    PicHeightInMapUnits = video->currSeqParams->pic_height_in_map_units_minus1 + 1 ;\n    FrameHeightInMbs = (2 - video->currSeqParams->frame_mbs_only_flag) * PicHeightInMapUnits ;\n    PicSizeInMapUnits = PicWidthInMbs * PicHeightInMapUnits ;\n\n    if (video->PicSizeInMapUnits != PicSizeInMapUnits || video->currSeqParams->level_idc != video->level_idc)\n    {\n        /* make sure you mark all the frames as unused for reference for flushing*/\n        for (ii = 0; ii < dpb->num_fs; ii++)\n        {\n            dpb->fs[ii]->IsReference = 0;\n            dpb->fs[ii]->IsOutputted |= 0x02;\n        }\n\n        num_fs = (uint32)(MaxDPBX2[(uint32)mapLev2Idx[video->currSeqParams->level_idc]] << 2) / (3 * PicSizeInMapUnits) + 1;\n        if (num_fs >= MAX_FS)\n        {\n            num_fs = MAX_FS;\n        }\n#ifdef PV_MEMORY_POOL\n        if (padding)\n        {\n            avcHandle->CBAVC_DPBAlloc(avcHandle->userData,\n                                      PicSizeInMapUnits + ((PicWidthInMbs + 2) << 1) + (PicHeightInMapUnits << 1), num_fs);\n        }\n        else\n        {\n            avcHandle->CBAVC_DPBAlloc(avcHandle->userData, PicSizeInMapUnits, num_fs);\n        }\n#endif\n\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"AVCConfigureSequence CleanUpDPB & InitDPB\");\n\n        CleanUpDPB(avcHandle, video);\n        if (InitDPB(avcHandle, video, FrameHeightInMbs, PicWidthInMbs, padding) != AVC_SUCCESS)\n        {\n            return AVC_FAIL;\n        }\n        /*  Allocate video->mblock upto PicSizeInMbs and populate the structure  such as the neighboring MB pointers.   */\n        framesize = (FrameHeightInMbs * PicWidthInMbs);\n        if (video->mblock)\n        {\n            avcHandle->CBAVC_Free(userData, (uint32)video->mblock);\n            video->mblock = NULL;\n        }\n        video->mblock = (AVCMacroblock*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCMacroblock) * framesize, DEFAULT_ATTR);\n        if (video->mblock == NULL)\n        {\n            return AVC_FAIL;\n        }\n        for (ii = 0; ii < framesize; ii++)\n        {\n            video->mblock[ii].slice_id = -1;\n        }\n        /* Allocate memory for intra prediction */\n#ifdef MB_BASED_DEBLOCK\n        video->intra_pred_top = (uint8*) avcHandle->CBAVC_Malloc(userData, PicWidthInMbs << 4, FAST_MEM_ATTR);\n        if (video->intra_pred_top == NULL)\n        {\n            return AVC_FAIL;\n        }\n        video->intra_pred_top_cb = (uint8*) avcHandle->CBAVC_Malloc(userData, PicWidthInMbs << 3, FAST_MEM_ATTR);\n        if (video->intra_pred_top_cb == NULL)\n        {\n            return AVC_FAIL;\n        }\n        video->intra_pred_top_cr = (uint8*) avcHandle->CBAVC_Malloc(userData, PicWidthInMbs << 3, FAST_MEM_ATTR);\n        if (video->intra_pred_top_cr == NULL)\n        {\n            return AVC_FAIL;\n        }\n\n#endif\n        /*  Allocate slice group MAP map */\n\n        if (video->MbToSliceGroupMap)\n        {\n            avcHandle->CBAVC_Free(userData, (uint32)video->MbToSliceGroupMap);\n            video->MbToSliceGroupMap = NULL;\n        }\n        video->MbToSliceGroupMap = (int*) avcHandle->CBAVC_Malloc(userData, sizeof(uint) * PicSizeInMapUnits * 2, 7/*DEFAULT_ATTR*/);\n        if (video->MbToSliceGroupMap == NULL)\n        {\n            return AVC_FAIL;\n        }\n        video->PicSizeInMapUnits = PicSizeInMapUnits;\n        video->level_idc = video->currSeqParams->level_idc;\n\n    }\n    return AVC_SUCCESS;\n}\n\nOSCL_EXPORT_REF AVCStatus CleanUpDPB(AVCHandle *avcHandle, AVCCommonObj *video)\n{\n    AVCDecPicBuffer *dpb = video->decPicBuf;\n    int ii;\n    void *userData = avcHandle->userData;\n\n    for (ii = 0; ii < MAX_FS; ii++)\n    {\n        if (dpb->fs[ii] != NULL)\n        {\n            avcHandle->CBAVC_Free(userData, (int)dpb->fs[ii]);\n            dpb->fs[ii] = NULL;\n        }\n    }\n#ifndef PV_MEMORY_POOL\n    if (dpb->decoded_picture_buffer)\n    {\n        avcHandle->CBAVC_Free(userData, (int)dpb->decoded_picture_buffer);\n        dpb->decoded_picture_buffer = NULL;\n    }\n#endif\n    dpb->used_size = 0;\n    dpb->dpb_size = 0;\n\n    return AVC_SUCCESS;\n}\n\nOSCL_EXPORT_REF AVCStatus DPBInitBuffer(AVCHandle *avcHandle, AVCCommonObj *video)\n{\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"DPBInitBuffer\");\n\n    AVCDecPicBuffer *dpb = video->decPicBuf;\n    int ii, status;\n\n    /* Before doing any decoding, check if there's a frame memory available */\n    /* look for next unused dpb->fs, or complementary field pair */\n    /* video->currPic is assigned to this */\n\n    /* There's also restriction on the frame_num, see page 59 of JVT-I1010.doc. */\n\n    for (ii = 0; ii < dpb->num_fs; ii++)\n    {\n        /* looking for the one not used or not reference and has been outputted */\n        if (dpb->fs[ii]->IsReference == 0 && dpb->fs[ii]->IsOutputted == 3)\n        {\n            video->currFS = dpb->fs[ii];\n#ifdef PV_MEMORY_POOL\n            status = avcHandle->CBAVC_FrameBind(avcHandle->userData, ii, &(video->currFS->base_dpb));\n            if (status == AVC_FAIL)\n            {\n                __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"DPBInitBuffer CBAVC_FrameBind-> return AVC_NO_BUFFER for fs: %d\", ii);\n\n                return AVC_NO_BUFFER; /* this should not happen */\n            }\n#endif\n            break;\n        }\n    }\n    if (ii == dpb->num_fs)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"DPBInitBuffer return AVC_PICTURE_OUTPUT_READY\");\n\n        return AVC_PICTURE_OUTPUT_READY; /* no empty frame available */\n    }\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"DPBInitBuffer final return AVC_SUCCESS\");\n\n    return AVC_SUCCESS;\n}\n\nOSCL_EXPORT_REF void DPBInitPic(AVCCommonObj *video, int CurrPicNum)\n{\n    int offset = 0;\n    int offsetc = 0;\n    int luma_framesize;\n    /* this part has to be set here, assuming that slice header and POC have been decoded. */\n    /* used in GetOutput API */\n    video->currFS->PicOrderCnt = video->PicOrderCnt;\n    video->currFS->FrameNum = video->sliceHdr->frame_num;\n    video->currFS->FrameNumWrap = CurrPicNum;    // MC_FIX\n    /* initialize everything to zero */\n    video->currFS->IsOutputted = 0;\n    video->currFS->IsReference = 0;\n    video->currFS->IsLongTerm = 0;\n    video->currFS->frame.isReference = FALSE;\n    video->currFS->frame.isLongTerm = FALSE;\n\n    /* initialize the pixel pointer to NULL */\n    video->currFS->frame.Sl = video->currFS->frame.Scb = video->currFS->frame.Scr = NULL;\n\n    /* determine video->currPic */\n    /* assign dbp->base_dpb to fs[i]->frame.Sl, Scb, Scr .*/\n    /* For PicSizeInMbs, see DecodeSliceHeader() */\n\n    video->currPic = &(video->currFS->frame);\n\n    video->currPic->padded = 0; // reset this flag to not-padded\n\n    if (video->padded_size)\n    {\n        offset = ((video->PicWidthInSamplesL + 32) << 4) + 16; // offset to the origin\n        offsetc = (offset >> 2) + 4;\n        luma_framesize = (int)((((video->FrameHeightInMbs + 2) * (video->PicWidthInMbs + 2)) << 8));\n    }\n    else\n        luma_framesize = video->PicSizeInMbs << 8;\n\n\n    video->currPic->Sl = video->currFS->base_dpb + offset;\n    video->currPic->Scb = video->currFS->base_dpb  + luma_framesize + offsetc;\n    video->currPic->Scr = video->currPic->Scb + (luma_framesize >> 2);\n    video->currPic->pitch = video->PicWidthInSamplesL + (video->padded_size == 0 ? 0 : 32);\n\n\n    video->currPic->height = video->PicHeightInSamplesL;\n    video->currPic->width = video->PicWidthInSamplesL;\n    video->currPic->PicNum = CurrPicNum;\n}\n\n/* to release skipped frame after encoding */\nOSCL_EXPORT_REF void DPBReleaseCurrentFrame(AVCHandle *avcHandle, AVCCommonObj *video)\n{\n    AVCDecPicBuffer *dpb = video->decPicBuf;\n    int ii;\n\n    video->currFS->IsOutputted = 3; // return this buffer.\n\n#ifdef PV_MEMORY_POOL /* for non-memory pool, no need to do anything */\n\n    /* search for current frame index */\n    ii = dpb->num_fs;\n    while (ii--)\n    {\n        if (dpb->fs[ii] == video->currFS)\n        {\n            avcHandle->CBAVC_FrameUnbind(avcHandle->userData, ii);\n            break;\n        }\n    }\n#endif\n\n    return ;\n}\n\n/* see subclause 8.2.5.1 */\nOSCL_EXPORT_REF AVCStatus StorePictureInDPB(AVCHandle *avcHandle, AVCCommonObj *video)\n{\n    AVCStatus status;\n    AVCDecPicBuffer *dpb = video->decPicBuf;\n    AVCSliceHeader *sliceHdr = video->sliceHdr;\n    int ii, num_ref;\n\n    /* number 1 of 8.2.5.1, we handle gaps in frame_num differently without using the memory */\n    /* to be done!!!! */\n\n    /* number 3 of 8.2.5.1 */\n    if (video->nal_unit_type == AVC_NALTYPE_IDR)\n    {\n        for (ii = 0; ii < dpb->num_fs; ii++)\n        {\n            if (dpb->fs[ii] != video->currFS) /* not current frame */\n            {\n                dpb->fs[ii]->IsReference = 0; /* mark as unused for reference */\n                dpb->fs[ii]->IsLongTerm = 0;  /* but still used until output */\n                dpb->fs[ii]->IsOutputted |= 0x02;\n#ifdef PV_MEMORY_POOL\n                if (dpb->fs[ii]->IsOutputted == 3)\n                {\n                    avcHandle->CBAVC_FrameUnbind(avcHandle->userData, ii);\n                }\n#endif\n            }\n        }\n\n        video->currPic->isReference = TRUE;\n        video->currFS->IsReference = 3;\n\n        if (sliceHdr->long_term_reference_flag == 0)\n        {\n            video->currPic->isLongTerm = FALSE;\n            video->currFS->IsLongTerm = 0;\n            video->MaxLongTermFrameIdx = -1;\n        }\n        else\n        {\n            video->currPic->isLongTerm = TRUE;\n            video->currFS->IsLongTerm = 3;\n            video->currFS->LongTermFrameIdx = 0;\n            video->MaxLongTermFrameIdx = 0;\n        }\n        if (sliceHdr->no_output_of_prior_pics_flag)\n        {\n            for (ii = 0; ii < dpb->num_fs; ii++)\n            {\n                if (dpb->fs[ii] != video->currFS) /* not current frame */\n                {\n                    dpb->fs[ii]->IsOutputted = 3;\n#ifdef PV_MEMORY_POOL\n                    avcHandle->CBAVC_FrameUnbind(avcHandle->userData, ii);\n#endif\n                }\n            }\n        }\n        video->mem_mgr_ctrl_eq_5 = TRUE;    /* flush reference frames MC_FIX */\n    }\n    else\n    {\n        if (video->currPic->isReference == TRUE)\n        {\n            if (sliceHdr->adaptive_ref_pic_marking_mode_flag == 0)\n            {\n                status = sliding_window_process(avcHandle, video, dpb); /* we may have to do this after adaptive_memory_marking */\n            }\n            else\n            {\n                status = adaptive_memory_marking(avcHandle, video, dpb, sliceHdr);\n            }\n            if (status != AVC_SUCCESS)\n            {\n                return status;\n            }\n        }\n    }\n    /* number 4 of 8.2.5.1 */\n    /* This basically says every frame must be at least used for short-term ref. */\n    /* Need to be revisited!!! */\n    /* look at insert_picture_in_dpb() */\n\n\n\n    if (video->nal_unit_type != AVC_NALTYPE_IDR && video->currPic->isLongTerm == FALSE)\n    {\n        if (video->currPic->isReference)\n        {\n            video->currFS->IsReference = 3;\n        }\n        else\n        {\n            video->currFS->IsReference = 0;\n        }\n        video->currFS->IsLongTerm = 0;\n    }\n\n    /* check if number of reference frames doesn't exceed num_ref_frames */\n    num_ref = 0;\n    for (ii = 0; ii < dpb->num_fs; ii++)\n    {\n        if (dpb->fs[ii]->IsReference)\n        {\n            num_ref++;\n        }\n    }\n\n    if (num_ref > (int)video->currSeqParams->num_ref_frames)\n    {\n        return AVC_FAIL; /* out of range */\n    }\n\n    return AVC_SUCCESS;\n}\n\n\nAVCStatus sliding_window_process(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb)\n{\n    int ii, numShortTerm, numLongTerm;\n    int32 MinFrameNumWrap;\n    int MinIdx;\n\n\n    numShortTerm = 0;\n    numLongTerm = 0;\n    for (ii = 0; ii < dpb->num_fs; ii++)\n    {\n        if (dpb->fs[ii] != video->currFS) /* do not count the current frame */\n        {\n            if (dpb->fs[ii]->IsLongTerm)\n            {\n                numLongTerm++;\n            }\n            else if (dpb->fs[ii]->IsReference)\n            {\n                numShortTerm++;\n            }\n        }\n    }\n\n    /* Remove this check to allow certain corrupted content to pass. Can re-enable it if\n       it turns out to cause undesirable effect.\n\n      if (numShortTerm <= 0)\n      {\n          return AVC_FAIL;\n      } */\n\n    while (numShortTerm + numLongTerm >= (int)video->currSeqParams->num_ref_frames)\n    {\n        /* get short-term ref frame with smallest PicOrderCnt */\n        /* this doesn't work for all I-slice clip since PicOrderCnt will not be initialized */\n\n        MinFrameNumWrap = 0x7FFFFFFF;\n        MinIdx = -1;\n        for (ii = 0; ii < dpb->num_fs; ii++)\n        {\n            if (dpb->fs[ii]->IsReference && !dpb->fs[ii]->IsLongTerm)\n            {\n                if (dpb->fs[ii]->FrameNumWrap < MinFrameNumWrap)\n                {\n                    MinFrameNumWrap = dpb->fs[ii]->FrameNumWrap;\n                    MinIdx = ii;\n                }\n            }\n        }\n        if (MinIdx < 0) /* something wrong, impossible */\n        {\n            return AVC_FAIL;\n        }\n\n        /* mark the frame with smallest PicOrderCnt to be unused for reference */\n        dpb->fs[MinIdx]->IsReference = 0;\n        dpb->fs[MinIdx]->IsLongTerm = 0;\n        dpb->fs[MinIdx]->frame.isReference = FALSE;\n        dpb->fs[MinIdx]->frame.isLongTerm = FALSE;\n        dpb->fs[MinIdx]->IsOutputted |= 0x02;\n#ifdef PV_MEMORY_POOL\n        if (dpb->fs[MinIdx]->IsOutputted == 3)\n        {\n            avcHandle->CBAVC_FrameUnbind(avcHandle->userData, MinIdx);\n        }\n#endif\n        numShortTerm--;\n    }\n    return AVC_SUCCESS;\n}\n\n/* see subclause 8.2.5.4 */\nAVCStatus adaptive_memory_marking(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, AVCSliceHeader *sliceHdr)\n{\n    int ii;\n\n    ii = 0;\n    while (ii < MAX_DEC_REF_PIC_MARKING && sliceHdr->memory_management_control_operation[ii] != 0)\n    {\n        switch (sliceHdr->memory_management_control_operation[ii])\n        {\n            case 1:\n                MemMgrCtrlOp1(avcHandle, video, dpb, sliceHdr->difference_of_pic_nums_minus1[ii]);\n                //      update_ref_list(dpb);\n                break;\n            case 2:\n                MemMgrCtrlOp2(avcHandle, dpb, sliceHdr->long_term_pic_num[ii]);\n                break;\n            case 3:\n                MemMgrCtrlOp3(avcHandle, video, dpb, sliceHdr->difference_of_pic_nums_minus1[ii], sliceHdr->long_term_frame_idx[ii]);\n                break;\n            case 4:\n                MemMgrCtrlOp4(avcHandle, video, dpb, sliceHdr->max_long_term_frame_idx_plus1[ii]);\n                break;\n            case 5:\n                MemMgrCtrlOp5(avcHandle, video, dpb);\n                video->currFS->FrameNum = 0;    //\n                video->currFS->PicOrderCnt = 0;\n                break;\n            case 6:\n                MemMgrCtrlOp6(avcHandle, video, dpb, sliceHdr->long_term_frame_idx[ii]);\n                break;\n        }\n        ii++;\n    }\n\n    if (ii == MAX_DEC_REF_PIC_MARKING)\n    {\n        return AVC_FAIL; /* exceed the limit */\n    }\n\n    return AVC_SUCCESS;\n}\n\n\n/* see subclause 8.2.5.4.1, mark short-term picture as \"unused for reference\" */\nvoid MemMgrCtrlOp1(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, int difference_of_pic_nums_minus1)\n{\n    int picNumX, ii;\n\n    picNumX = video->CurrPicNum - (difference_of_pic_nums_minus1 + 1);\n\n    for (ii = 0; ii < dpb->num_fs; ii++)\n    {\n        if (dpb->fs[ii]->IsReference == 3 && dpb->fs[ii]->IsLongTerm == 0)\n        {\n            if (dpb->fs[ii]->frame.PicNum == picNumX)\n            {\n                unmark_for_reference(avcHandle, dpb, ii);\n                return ;\n            }\n        }\n    }\n\n    return ;\n}\n\n/* see subclause 8.2.5.4.2 mark long-term picture as \"unused for reference\" */\nvoid MemMgrCtrlOp2(AVCHandle *avcHandle, AVCDecPicBuffer *dpb, int long_term_pic_num)\n{\n    int ii;\n\n    for (ii = 0; ii < dpb->num_fs; ii++)\n    {\n        if (dpb->fs[ii]->IsLongTerm == 3)\n        {\n            if (dpb->fs[ii]->frame.LongTermPicNum == long_term_pic_num)\n            {\n                unmark_for_reference(avcHandle, dpb, ii);\n            }\n        }\n    }\n}\n\n/* see subclause 8.2.5.4.3 assign LongTermFrameIdx to a short-term ref picture */\nvoid MemMgrCtrlOp3(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, uint difference_of_pic_nums_minus1,\n                   uint long_term_frame_idx)\n{\n    int picNumX, ii;\n\n    picNumX = video->CurrPicNum - (difference_of_pic_nums_minus1 + 1);\n\n    /* look for fs[i] with long_term_frame_idx */\n\n    unmark_long_term_frame_for_reference_by_frame_idx(avcHandle, dpb, long_term_frame_idx);\n\n\n    /* now mark the picture with picNumX to long term frame idx */\n\n    for (ii = 0; ii < dpb->num_fs; ii++)\n    {\n        if (dpb->fs[ii]->IsReference == 3)\n        {\n            if ((dpb->fs[ii]->frame.isLongTerm == FALSE) && (dpb->fs[ii]->frame.PicNum == picNumX))\n            {\n                dpb->fs[ii]->LongTermFrameIdx = long_term_frame_idx;\n                dpb->fs[ii]->frame.LongTermPicNum = long_term_frame_idx;\n\n                dpb->fs[ii]->frame.isLongTerm = TRUE;\n\n                dpb->fs[ii]->IsLongTerm = 3;\n                return;\n            }\n        }\n    }\n\n}\n\n/* see subclause 8.2.5.4.4, MaxLongTermFrameIdx */\nvoid MemMgrCtrlOp4(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, uint max_long_term_frame_idx_plus1)\n{\n    int ii;\n\n    video->MaxLongTermFrameIdx = max_long_term_frame_idx_plus1 - 1;\n\n    /* then mark long term frame with exceeding LongTermFrameIdx to unused for reference. */\n    for (ii = 0; ii < dpb->num_fs; ii++)\n    {\n        if (dpb->fs[ii]->IsLongTerm && dpb->fs[ii] != video->currFS)\n        {\n            if (dpb->fs[ii]->LongTermFrameIdx > video->MaxLongTermFrameIdx)\n            {\n                unmark_for_reference(avcHandle, dpb, ii);\n            }\n        }\n    }\n}\n\n/* see subclause 8.2.5.4.5 mark all reference picture as \"unused for reference\" and setting\nMaxLongTermFrameIdx to \"no long-term frame indices\" */\nvoid MemMgrCtrlOp5(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb)\n{\n    int ii;\n\n    video->MaxLongTermFrameIdx = -1;\n    for (ii = 0; ii < dpb->num_fs; ii++) /* including the current frame ??????*/\n    {\n        if (dpb->fs[ii] != video->currFS) // MC_FIX\n        {\n            unmark_for_reference(avcHandle, dpb, ii);\n        }\n    }\n\n    video->mem_mgr_ctrl_eq_5 = TRUE;\n}\n\n/* see subclause 8.2.5.4.6 assing long-term frame index to the current picture */\nvoid MemMgrCtrlOp6(AVCHandle *avcHandle, AVCCommonObj *video, AVCDecPicBuffer *dpb, uint long_term_frame_idx)\n{\n\n    unmark_long_term_frame_for_reference_by_frame_idx(avcHandle, dpb, long_term_frame_idx);\n    video->currFS->IsLongTerm = 3;\n    video->currFS->IsReference = 3;\n\n    video->currPic->isLongTerm = TRUE;\n    video->currPic->isReference = TRUE;\n    video->currFS->LongTermFrameIdx = long_term_frame_idx;\n}\n\n\nvoid unmark_for_reference(AVCHandle *avcHandle, AVCDecPicBuffer *dpb, uint idx)\n{\n\n    AVCFrameStore *fs = dpb->fs[idx];\n    fs->frame.isReference = FALSE;\n    fs->frame.isLongTerm = FALSE;\n\n    fs->IsLongTerm = 0;\n    fs->IsReference = 0;\n    fs->IsOutputted |= 0x02;\n#ifdef PV_MEMORY_POOL\n    if (fs->IsOutputted == 3)\n    {\n        avcHandle->CBAVC_FrameUnbind(avcHandle->userData, idx);\n    }\n#endif\n    return ;\n}\n\nvoid unmark_long_term_frame_for_reference_by_frame_idx(AVCHandle *avcHandle, AVCDecPicBuffer *dpb, uint long_term_frame_idx)\n{\n    int ii;\n    for (ii = 0; ii < dpb->num_fs; ii++)\n    {\n\n        if (dpb->fs[ii]->IsLongTerm && (dpb->fs[ii]->LongTermFrameIdx == (int)long_term_frame_idx))\n        {\n            unmark_for_reference(avcHandle, dpb, ii);\n        }\n\n    }\n}\n\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/common/src/fmo.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"avclib_common.h\"\n#include \"oscl_mem.h\"\n\n/* see subclause 8.2.2 Decoding process for macroblock to slice group map */\nOSCL_EXPORT_REF AVCStatus FMOInit(AVCCommonObj *video)\n{\n    AVCPicParamSet *currPPS = video->currPicParams;\n    int *MbToSliceGroupMap = video->MbToSliceGroupMap;\n    int PicSizeInMapUnits = video->PicSizeInMapUnits;\n    int PicWidthInMbs = video->PicWidthInMbs;\n\n    if (currPPS->num_slice_groups_minus1 == 0)\n    {\n        oscl_memset(video->MbToSliceGroupMap, 0, video->PicSizeInMapUnits*sizeof(uint));\n    }\n    else\n    {\n        switch (currPPS->slice_group_map_type)\n        {\n            case 0:\n                FmoGenerateType0MapUnitMap(MbToSliceGroupMap, currPPS->run_length_minus1, currPPS->num_slice_groups_minus1, PicSizeInMapUnits);\n                break;\n            case 1:\n                FmoGenerateType1MapUnitMap(MbToSliceGroupMap, PicWidthInMbs, currPPS->num_slice_groups_minus1, PicSizeInMapUnits);\n                break;\n            case 2:\n                FmoGenerateType2MapUnitMap(currPPS, MbToSliceGroupMap, PicWidthInMbs, currPPS->num_slice_groups_minus1, PicSizeInMapUnits);\n                break;\n            case 3:\n                FmoGenerateType3MapUnitMap(video, currPPS, MbToSliceGroupMap, PicWidthInMbs);\n                break;\n            case 4:\n                FmoGenerateType4MapUnitMap(MbToSliceGroupMap, video->MapUnitsInSliceGroup0, currPPS->slice_group_change_direction_flag, PicSizeInMapUnits);\n                break;\n            case 5:\n                FmoGenerateType5MapUnitMap(MbToSliceGroupMap, video, currPPS->slice_group_change_direction_flag, PicSizeInMapUnits);\n                break;\n            case 6:\n                FmoGenerateType6MapUnitMap(MbToSliceGroupMap, (int*)currPPS->slice_group_id, PicSizeInMapUnits);\n                break;\n            default:\n                return AVC_FAIL; /* out of range, shouldn't come this far */\n        }\n    }\n\n    return AVC_SUCCESS;\n}\n\n/* see subclause 8.2.2.1 interleaved slice group map type*/\nvoid FmoGenerateType0MapUnitMap(int *mapUnitToSliceGroupMap, uint *run_length_minus1, uint num_slice_groups_minus1, uint PicSizeInMapUnits)\n{\n    uint iGroup, j;\n    uint i = 0;\n    do\n    {\n        for (iGroup = 0;\n                (iGroup <= num_slice_groups_minus1) && (i < PicSizeInMapUnits);\n                i += run_length_minus1[iGroup++] + 1)\n        {\n            for (j = 0; j <= run_length_minus1[ iGroup ] && i + j < PicSizeInMapUnits; j++)\n                mapUnitToSliceGroupMap[i+j] = iGroup;\n        }\n    }\n    while (i < PicSizeInMapUnits);\n}\n\n/* see subclause 8.2.2.2 dispersed slice group map type*/\nvoid FmoGenerateType1MapUnitMap(int *mapUnitToSliceGroupMap, int PicWidthInMbs, uint num_slice_groups_minus1, uint PicSizeInMapUnits)\n{\n    uint i;\n    for (i = 0; i < PicSizeInMapUnits; i++)\n    {\n        mapUnitToSliceGroupMap[i] = ((i % PicWidthInMbs) + (((i / PicWidthInMbs) * (num_slice_groups_minus1 + 1)) / 2))\n                                    % (num_slice_groups_minus1 + 1);\n    }\n}\n\n/* see subclause 8.2.2.3 foreground with left-over slice group map type */\nvoid FmoGenerateType2MapUnitMap(AVCPicParamSet *pps, int *mapUnitToSliceGroupMap, int PicWidthInMbs,\n                                uint num_slice_groups_minus1, uint PicSizeInMapUnits)\n{\n    int iGroup;\n    uint i, x, y;\n    uint yTopLeft, xTopLeft, yBottomRight, xBottomRight;\n\n    for (i = 0; i < PicSizeInMapUnits; i++)\n    {\n        mapUnitToSliceGroupMap[ i ] = num_slice_groups_minus1;\n    }\n\n    for (iGroup = num_slice_groups_minus1 - 1 ; iGroup >= 0; iGroup--)\n    {\n        yTopLeft = pps->top_left[ iGroup ] / PicWidthInMbs;\n        xTopLeft = pps->top_left[ iGroup ] % PicWidthInMbs;\n        yBottomRight = pps->bottom_right[ iGroup ] / PicWidthInMbs;\n        xBottomRight = pps->bottom_right[ iGroup ] % PicWidthInMbs;\n        for (y = yTopLeft; y <= yBottomRight; y++)\n        {\n            for (x = xTopLeft; x <= xBottomRight; x++)\n            {\n                mapUnitToSliceGroupMap[ y * PicWidthInMbs + x ] = iGroup;\n            }\n        }\n    }\n}\n\n\n/* see subclause 8.2.2.4 box-out slice group map type */\n/* follow the text rather than the JM, it's quite different. */\nvoid FmoGenerateType3MapUnitMap(AVCCommonObj *video, AVCPicParamSet* pps, int *mapUnitToSliceGroupMap,\n                                int PicWidthInMbs)\n{\n    uint i, k;\n    int leftBound, topBound, rightBound, bottomBound;\n    int x, y, xDir, yDir;\n    int mapUnitVacant;\n    uint PicSizeInMapUnits = video->PicSizeInMapUnits;\n    uint MapUnitsInSliceGroup0 = video->MapUnitsInSliceGroup0;\n\n    for (i = 0; i < PicSizeInMapUnits; i++)\n    {\n        mapUnitToSliceGroupMap[ i ] = 1;\n    }\n\n    x = (PicWidthInMbs - pps->slice_group_change_direction_flag) / 2;\n    y = (video->PicHeightInMapUnits - pps->slice_group_change_direction_flag) / 2;\n\n    leftBound   = x;\n    topBound    = y;\n    rightBound  = x;\n    bottomBound = y;\n\n    xDir =  pps->slice_group_change_direction_flag - 1;\n    yDir =  pps->slice_group_change_direction_flag;\n\n    for (k = 0; k < MapUnitsInSliceGroup0; k += mapUnitVacant)\n    {\n        mapUnitVacant = (mapUnitToSliceGroupMap[ y * PicWidthInMbs + x ]  ==  1);\n        if (mapUnitVacant)\n        {\n            mapUnitToSliceGroupMap[ y * PicWidthInMbs + x ] = 0;\n        }\n\n        if (xDir  ==  -1  &&  x  ==  leftBound)\n        {\n            leftBound = AVC_MAX(leftBound - 1, 0);\n            x = leftBound;\n            xDir = 0;\n            yDir = 2 * pps->slice_group_change_direction_flag - 1;\n        }\n        else if (xDir  ==  1  &&  x  ==  rightBound)\n        {\n            rightBound = AVC_MIN(rightBound + 1, (int)PicWidthInMbs - 1);\n            x = rightBound;\n            xDir = 0;\n            yDir = 1 - 2 * pps->slice_group_change_direction_flag;\n        }\n        else if (yDir  ==  -1  &&  y  ==  topBound)\n        {\n            topBound = AVC_MAX(topBound - 1, 0);\n            y = topBound;\n            xDir = 1 - 2 * pps->slice_group_change_direction_flag;\n            yDir = 0;\n        }\n        else  if (yDir  ==  1  &&  y  ==  bottomBound)\n        {\n            bottomBound = AVC_MIN(bottomBound + 1, (int)video->PicHeightInMapUnits - 1);\n            y = bottomBound;\n            xDir = 2 * pps->slice_group_change_direction_flag - 1;\n            yDir = 0;\n        }\n        else\n        {\n            x = x + xDir;\n            y = y + yDir;\n        }\n    }\n}\n\n/* see subclause 8.2.2.5 raster scan slice group map types */\nvoid FmoGenerateType4MapUnitMap(int *mapUnitToSliceGroupMap, int MapUnitsInSliceGroup0, int slice_group_change_direction_flag, uint PicSizeInMapUnits)\n{\n    uint sizeOfUpperLeftGroup = slice_group_change_direction_flag ? (PicSizeInMapUnits - MapUnitsInSliceGroup0) : MapUnitsInSliceGroup0;\n\n    uint i;\n\n    for (i = 0; i < PicSizeInMapUnits; i++)\n        if (i < sizeOfUpperLeftGroup)\n            mapUnitToSliceGroupMap[ i ] = 1 - slice_group_change_direction_flag;\n        else\n            mapUnitToSliceGroupMap[ i ] = slice_group_change_direction_flag;\n\n}\n\n/* see subclause 8.2.2.6, wipe slice group map type. */\nvoid FmoGenerateType5MapUnitMap(int *mapUnitToSliceGroupMap, AVCCommonObj *video,\n                                int slice_group_change_direction_flag, uint PicSizeInMapUnits)\n{\n    int PicWidthInMbs = video->PicWidthInMbs;\n    int PicHeightInMapUnits = video->PicHeightInMapUnits;\n    int MapUnitsInSliceGroup0 = video->MapUnitsInSliceGroup0;\n    int sizeOfUpperLeftGroup = slice_group_change_direction_flag ? (PicSizeInMapUnits - MapUnitsInSliceGroup0) : MapUnitsInSliceGroup0;\n    int i, j, k = 0;\n\n    for (j = 0; j < PicWidthInMbs; j++)\n    {\n        for (i = 0; i < PicHeightInMapUnits; i++)\n        {\n            if (k++ < sizeOfUpperLeftGroup)\n            {\n                mapUnitToSliceGroupMap[ i * PicWidthInMbs + j ] = 1 - slice_group_change_direction_flag;\n            }\n            else\n            {\n                mapUnitToSliceGroupMap[ i * PicWidthInMbs + j ] = slice_group_change_direction_flag;\n            }\n        }\n    }\n}\n\n/* see subclause 8.2.2.7, explicit slice group map */\nvoid FmoGenerateType6MapUnitMap(int *mapUnitToSliceGroupMap, int *slice_group_id, uint PicSizeInMapUnits)\n{\n    uint i;\n    for (i = 0; i < PicSizeInMapUnits; i++)\n    {\n        mapUnitToSliceGroupMap[i] = slice_group_id[i];\n    }\n}\n\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/common/src/mb_access.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"avclib_common.h\"\n#include \"oscl_mem.h\"\n\nOSCL_EXPORT_REF void InitNeighborAvailability(AVCCommonObj *video, int mbNum)\n{\n    int PicWidthInMbs = video->PicWidthInMbs;\n\n    // do frame-only and postpone intraAvail calculattion\n    video->mbAddrA = mbNum - 1;\n    video->mbAddrB = mbNum - PicWidthInMbs;\n    video->mbAddrC = mbNum - PicWidthInMbs + 1;\n    video->mbAddrD = mbNum - PicWidthInMbs - 1;\n\n    video->mbAvailA = video->mbAvailB = video->mbAvailC = video->mbAvailD = 0;\n    if (video->mb_x)\n    {\n        video->mbAvailA = (video->mblock[video->mbAddrA].slice_id == video->currMB->slice_id);\n        if (video->mb_y)\n        {\n            video->mbAvailD = (video->mblock[video->mbAddrD].slice_id == video->currMB->slice_id);\n        }\n    }\n\n    if (video->mb_y)\n    {\n        video->mbAvailB = (video->mblock[video->mbAddrB].slice_id == video->currMB->slice_id);\n        if (video->mb_x < (PicWidthInMbs - 1))\n        {\n            video->mbAvailC = (video->mblock[video->mbAddrC].slice_id == video->currMB->slice_id);\n        }\n    }\n    return ;\n}\n\nbool mb_is_available(AVCMacroblock *mblock, uint PicSizeInMbs, int mbAddr, int currMbAddr)\n{\n    if (mbAddr < 0 || mbAddr >= (int)PicSizeInMbs)\n    {\n        return FALSE;\n    }\n\n    if (mblock[mbAddr].slice_id != mblock[currMbAddr].slice_id)\n    {\n        return FALSE;\n    }\n\n    return TRUE;\n}\n\nOSCL_EXPORT_REF int predict_nnz(AVCCommonObj *video, int i, int j)\n{\n    int pred_nnz = 0;\n    int cnt      = 1;\n    AVCMacroblock *tempMB;\n\n    /* left block */\n    /*getLuma4x4Neighbour(video, mb_nr, i, j, -1, 0, &pix);\n    leftMB = video->mblock + pix.mb_addr; */\n    /* replace the above with below (won't work for field decoding),  1/19/04 */\n\n    if (i)\n    {\n        pred_nnz = video->currMB->nz_coeff[(j<<2)+i-1];\n    }\n    else\n    {\n        if (video->mbAvailA)\n        {\n            tempMB = video->mblock + video->mbAddrA;\n            pred_nnz = tempMB->nz_coeff[(j<<2)+3];\n        }\n        else\n        {\n            cnt = 0;\n        }\n    }\n\n\n    /* top block */\n    /*getLuma4x4Neighbour(video, mb_nr, i, j, 0, -1, &pix);\n    topMB = video->mblock + pix.mb_addr;*/\n    /* replace the above with below (won't work for field decoding),  1/19/04 */\n\n    if (j)\n    {\n        pred_nnz += video->currMB->nz_coeff[((j-1)<<2)+i];\n        cnt++;\n    }\n    else\n    {\n        if (video->mbAvailB)\n        {\n            tempMB = video->mblock + video->mbAddrB;\n            pred_nnz += tempMB->nz_coeff[12+i];\n            cnt++;\n        }\n    }\n\n\n    if (cnt == 2)\n    {\n        pred_nnz = (pred_nnz + 1) >> 1;\n    }\n\n    return pred_nnz;\n\n}\n\n\nOSCL_EXPORT_REF int predict_nnz_chroma(AVCCommonObj *video, int i, int j)\n{\n    int pred_nnz = 0;\n    int cnt      = 1;\n    AVCMacroblock *tempMB;\n\n    /* left block */\n    /*getChroma4x4Neighbour(video, mb_nr, i%2, j-4, -1, 0, &pix);\n    leftMB = video->mblock + pix.mb_addr;*/\n    /* replace the above with below (won't work for field decoding),  1/19/04 */\n    if (i&1)\n    {\n        pred_nnz = video->currMB->nz_coeff[(j<<2)+i-1];\n\n    }\n    else\n    {\n        if (video->mbAvailA)\n        {\n            tempMB = video->mblock + video->mbAddrA;\n            pred_nnz = tempMB->nz_coeff[(j<<2)+i+1];\n        }\n        else\n        {\n            cnt = 0;\n        }\n    }\n\n\n    /* top block */\n    /*getChroma4x4Neighbour(video, mb_nr, i%2, j-4, 0, -1, &pix);\n    topMB = video->mblock + pix.mb_addr;*/\n    /* replace the above with below (won't work for field decoding),  1/19/04 */\n\n    if (j&1)\n    {\n        pred_nnz += video->currMB->nz_coeff[((j-1)<<2)+i];\n        cnt++;\n    }\n    else\n    {\n        if (video->mbAvailB)\n        {\n            tempMB = video->mblock + video->mbAddrB;\n            pred_nnz += tempMB->nz_coeff[20+i];\n            cnt++;\n        }\n\n    }\n\n    if (cnt == 2)\n    {\n        pred_nnz = (pred_nnz + 1) >> 1;\n    }\n\n    return pred_nnz;\n}\n\nOSCL_EXPORT_REF void GetMotionVectorPredictor(AVCCommonObj *video, int encFlag)\n{\n    AVCMacroblock *currMB = video->currMB;\n    AVCMacroblock *MB_A, *MB_B, *MB_C, *MB_D;\n    int block_x, block_y, block_x_1, block_y_1, new_block_x;\n    int mbPartIdx, subMbPartIdx, offset_indx;\n    int16 *mv, pmv_x, pmv_y;\n    int nmSubMbHeight, nmSubMbWidth, mbPartIdx_X, mbPartIdx_Y;\n    int avail_a, avail_b, avail_c;\n    const static uint32 C = 0x5750;\n    int i, j, offset_MbPart_indx, refIdxLXA, refIdxLXB, refIdxLXC = 0, curr_ref_idx;\n    int pmv_A_x, pmv_B_x, pmv_C_x = 0, pmv_A_y, pmv_B_y, pmv_C_y = 0;\n\n    /* we have to take care of Intra/skip blocks somewhere, i.e. set MV to  0 and set ref to -1! */\n    /* we have to populate refIdx as well */\n\n\n    MB_A = &video->mblock[video->mbAddrA];\n    MB_B = &video->mblock[video->mbAddrB];\n\n\n    if (currMB->mbMode == AVC_SKIP /* && !encFlag */) /* only for decoder */\n    {\n        currMB->ref_idx_L0[0] = currMB->ref_idx_L0[1] = currMB->ref_idx_L0[2] = currMB->ref_idx_L0[3] = 0;\n        if (video->mbAvailA && video->mbAvailB)\n        {\n            if ((MB_A->ref_idx_L0[1] == 0 && MB_A->mvL0[3] == 0) ||\n                    (MB_B->ref_idx_L0[2] == 0 && MB_B->mvL0[12] == 0))\n            {\n                oscl_memset(currMB->mvL0, 0, sizeof(int32)*16);\n                return;\n            }\n        }\n        else\n        {\n            oscl_memset(currMB->mvL0, 0, sizeof(int32)*16);\n            return;\n        }\n        video->mvd_l0[0][0][0] = 0;\n        video->mvd_l0[0][0][1] = 0;\n    }\n\n    MB_C = &video->mblock[video->mbAddrC];\n    MB_D = &video->mblock[video->mbAddrD];\n\n    offset_MbPart_indx = 0;\n    for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)\n    {\n        offset_indx = 0;\n        nmSubMbHeight = currMB->SubMbPartHeight[mbPartIdx] >> 2;\n        nmSubMbWidth = currMB->SubMbPartWidth[mbPartIdx] >> 2;\n        mbPartIdx_X = ((mbPartIdx + offset_MbPart_indx) & 1) << 1;\n        mbPartIdx_Y = (mbPartIdx + offset_MbPart_indx) & 2;\n\n        for (subMbPartIdx = 0; subMbPartIdx < currMB->NumSubMbPart[mbPartIdx]; subMbPartIdx++)\n        {\n            block_x = mbPartIdx_X + ((subMbPartIdx + offset_indx) & 1);\n            block_y = mbPartIdx_Y + (((subMbPartIdx + offset_indx) >> 1) & 1);\n\n            block_x_1 = block_x - 1;\n            block_y_1 = block_y - 1;\n            refIdxLXA = refIdxLXB = refIdxLXC = -1;\n            pmv_A_x = pmv_A_y = pmv_B_x = pmv_B_y = pmv_C_x = pmv_C_y = 0;\n\n            if (block_x)\n            {\n                avail_a = 1;\n                refIdxLXA = currMB->ref_idx_L0[(block_y & 2) + (block_x_1 >> 1)];\n                mv = (int16*)(currMB->mvL0 + (block_y << 2) + block_x_1);\n                pmv_A_x = *mv++;\n                pmv_A_y = *mv;\n            }\n            else\n            {\n                avail_a = video->mbAvailA;\n                if (avail_a)\n                {\n                    refIdxLXA = MB_A->ref_idx_L0[(block_y & 2) + 1];\n                    mv = (int16*)(MB_A->mvL0 + (block_y << 2) + 3);\n                    pmv_A_x = *mv++;\n                    pmv_A_y = *mv;\n                }\n            }\n\n            if (block_y)\n            {\n                avail_b = 1;\n                refIdxLXB = currMB->ref_idx_L0[(block_y_1 & 2) + (block_x >> 1)];\n                mv = (int16*)(currMB->mvL0 + (block_y_1 << 2) + block_x);\n                pmv_B_x = *mv++;\n                pmv_B_y = *mv;\n            }\n\n            else\n            {\n                avail_b = video->mbAvailB;\n                if (avail_b)\n                {\n                    refIdxLXB = MB_B->ref_idx_L0[2 + (block_x >> 1)];\n                    mv = (int16*)(MB_B->mvL0 + 12 + block_x);\n                    pmv_B_x = *mv++;\n                    pmv_B_y = *mv;\n                }\n            }\n\n            new_block_x = block_x + (currMB->SubMbPartWidth[mbPartIdx] >> 2) - 1;\n            avail_c = (C >> ((block_y << 2) + new_block_x)) & 0x1;\n\n            if (avail_c)\n            {\n                /* it guaranteed that block_y > 0 && new_block_x<3 ) */\n                refIdxLXC = currMB->ref_idx_L0[(block_y_1 & 2) + ((new_block_x+1) >> 1)];\n                mv = (int16*)(currMB->mvL0 + (block_y_1 << 2) + (new_block_x + 1));\n                pmv_C_x = *mv++;\n                pmv_C_y = *mv;\n            }\n            else\n            {\n                if (block_y == 0 && new_block_x < 3)\n                {\n                    avail_c = video->mbAvailB;\n                    if (avail_c)\n                    {\n                        refIdxLXC = MB_B->ref_idx_L0[2 + ((new_block_x+1)>>1)];\n                        mv = (int16*)(MB_B->mvL0 + 12 + (new_block_x + 1));\n                        pmv_C_x = *mv++;\n                        pmv_C_y = *mv;\n                    }\n                }\n                else if (block_y == 0 && new_block_x == 3)\n                {\n                    avail_c = video->mbAvailC;\n                    if (avail_c)\n                    {\n                        refIdxLXC = MB_C->ref_idx_L0[2];\n                        mv = (int16*)(MB_C->mvL0 + 12);\n                        pmv_C_x = *mv++;\n                        pmv_C_y = *mv;\n                    }\n                }\n\n                if (avail_c == 0)\n                {   /* check D */\n                    if (block_x && block_y)\n                    {\n                        avail_c = 1;\n                        refIdxLXC =  currMB->ref_idx_L0[(block_y_1 & 2) + (block_x_1 >> 1)];\n                        mv = (int16*)(currMB->mvL0 + (block_y_1 << 2) + block_x_1);\n                        pmv_C_x = *mv++;\n                        pmv_C_y = *mv;\n                    }\n                    else if (block_y)\n                    {\n                        avail_c = video->mbAvailA;\n                        if (avail_c)\n                        {\n                            refIdxLXC =  MB_A->ref_idx_L0[(block_y_1 & 2) + 1];\n                            mv = (int16*)(MB_A->mvL0 + (block_y_1 << 2) + 3);\n                            pmv_C_x = *mv++;\n                            pmv_C_y = *mv;\n                        }\n                    }\n                    else if (block_x)\n                    {\n                        avail_c = video->mbAvailB;\n                        if (avail_c)\n                        {\n                            refIdxLXC = MB_B->ref_idx_L0[2 + (block_x_1 >> 1)];\n                            mv = (int16*)(MB_B->mvL0 + 12 + block_x_1);\n                            pmv_C_x = *mv++;\n                            pmv_C_y = *mv;\n                        }\n                    }\n                    else\n                    {\n                        avail_c = video->mbAvailD;\n                        if (avail_c)\n                        {\n                            refIdxLXC = MB_D->ref_idx_L0[3];\n                            mv = (int16*)(MB_D->mvL0 + 15);\n                            pmv_C_x = *mv++;\n                            pmv_C_y = *mv;\n                        }\n                    }\n                }\n            }\n\n            offset_indx = currMB->SubMbPartWidth[mbPartIdx] >> 3;\n\n            curr_ref_idx = currMB->ref_idx_L0[(block_y & 2) + (block_x >> 1)];\n\n            if (avail_a && !(avail_b || avail_c))\n            {\n                pmv_x = pmv_A_x;\n                pmv_y = pmv_A_y;\n            }\n            else if (((curr_ref_idx == refIdxLXA) + (curr_ref_idx == refIdxLXB) + (curr_ref_idx == refIdxLXC)) == 1)\n            {\n                if (curr_ref_idx == refIdxLXA)\n                {\n                    pmv_x = pmv_A_x;\n                    pmv_y = pmv_A_y;\n                }\n                else if (curr_ref_idx == refIdxLXB)\n                {\n                    pmv_x = pmv_B_x;\n                    pmv_y = pmv_B_y;\n                }\n                else\n                {\n                    pmv_x = pmv_C_x;\n                    pmv_y = pmv_C_y;\n                }\n            }\n            else\n            {\n                pmv_x = AVC_MEDIAN(pmv_A_x, pmv_B_x, pmv_C_x);\n                pmv_y = AVC_MEDIAN(pmv_A_y, pmv_B_y, pmv_C_y);\n            }\n\n            /* overwrite if special case */\n            if (currMB->NumMbPart == 2)\n            {\n                if (currMB->MbPartWidth == 16)\n                {\n                    if (mbPartIdx == 0)\n                    {\n                        if (refIdxLXB == curr_ref_idx)\n                        {\n                            pmv_x = pmv_B_x;\n                            pmv_y = pmv_B_y;\n                        }\n                    }\n                    else if (refIdxLXA == curr_ref_idx)\n                    {\n                        pmv_x = pmv_A_x;\n                        pmv_y = pmv_A_y;\n                    }\n                }\n                else\n                {\n                    if (mbPartIdx == 0)\n                    {\n                        if (refIdxLXA == curr_ref_idx)\n                        {\n                            pmv_x = pmv_A_x;\n                            pmv_y = pmv_A_y;\n                        }\n                    }\n                    else if (refIdxLXC == curr_ref_idx)\n                    {\n                        pmv_x = pmv_C_x;\n                        pmv_y = pmv_C_y;\n                    }\n                }\n            }\n\n            mv = (int16*)(currMB->mvL0 + block_x + (block_y << 2));\n\n            if (encFlag) /* calculate residual MV video->mvd_l0 */\n            {\n                video->mvd_l0[mbPartIdx][subMbPartIdx][0] = *mv++ - pmv_x;\n                video->mvd_l0[mbPartIdx][subMbPartIdx][1] = *mv++ - pmv_y;\n            }\n            else    /* calculate original MV currMB->mvL0 */\n            {\n                pmv_x += video->mvd_l0[mbPartIdx][subMbPartIdx][0];\n                pmv_y += video->mvd_l0[mbPartIdx][subMbPartIdx][1];\n\n                for (i = 0; i < nmSubMbHeight; i++)\n                {\n                    for (j = 0; j < nmSubMbWidth; j++)\n                    {\n                        *mv++ = pmv_x;\n                        *mv++ = pmv_y;\n                    }\n                    mv += (8 - (j << 1));\n                }\n            }\n        }\n        offset_MbPart_indx = currMB->MbPartWidth >> 4;\n\n    }\n}\n\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/common/src/reflist.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"avclib_common.h\"\n\n// xxx pa\n#define LOG_TAG \"reflist\"\n#include \"android/log.h\"\n\n/** see subclause 8.2.4 Decoding process for reference picture lists construction. */\nOSCL_EXPORT_REF void RefListInit(AVCCommonObj *video)\n{\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"RefListInit\");\n\n    AVCSliceHeader *sliceHdr = video->sliceHdr;\n    AVCDecPicBuffer *dpb = video->decPicBuf;\n    int slice_type = video->slice_type;\n    int i, list0idx;\n\n    AVCPictureData *tmp_s;\n\n    list0idx = 0;\n\n    if (slice_type == AVC_I_SLICE)\n    {\n        video->refList0Size = 0;\n        video->refList1Size = 0;\n\n        /* we still have to calculate FrameNumWrap to make sure that all I-slice clip\n        can perform sliding_window_operation properly. */\n\n        for (i = 0; i < dpb->num_fs; i++)\n        {\n            if ((dpb->fs[i]->IsReference == 3) && (!dpb->fs[i]->IsLongTerm))\n            {\n                /* subclause 8.2.4.1 Decoding process for picture numbers. */\n                if (dpb->fs[i]->FrameNum > (int)sliceHdr->frame_num)\n                {\n                    dpb->fs[i]->FrameNumWrap = dpb->fs[i]->FrameNum - video->MaxFrameNum;\n                }\n                else\n                {\n                    dpb->fs[i]->FrameNumWrap = dpb->fs[i]->FrameNum;\n                }\n                dpb->fs[i]->frame.PicNum = dpb->fs[i]->FrameNumWrap;\n            }\n        }\n\n\n        return ;\n    }\n    if (slice_type == AVC_P_SLICE)\n    {\n        /* Calculate FrameNumWrap and PicNum */\n\n        for (i = 0; i < dpb->num_fs; i++)\n        {\n            if ((dpb->fs[i]->IsReference == 3) && (!dpb->fs[i]->IsLongTerm))\n            {\n                /* subclause 8.2.4.1 Decoding process for picture numbers. */\n                if (dpb->fs[i]->FrameNum > (int)sliceHdr->frame_num)\n                {\n                    dpb->fs[i]->FrameNumWrap = dpb->fs[i]->FrameNum - video->MaxFrameNum;\n                }\n                else\n                {\n                    dpb->fs[i]->FrameNumWrap = dpb->fs[i]->FrameNum;\n                }\n                dpb->fs[i]->frame.PicNum = dpb->fs[i]->FrameNumWrap;\n                video->RefPicList0[list0idx++] = &(dpb->fs[i]->frame);\n            }\n        }\n\n        if (list0idx == 0)\n        {\n            dpb->fs[0]->IsReference = 3;\n            video->RefPicList0[0] = &(dpb->fs[0]->frame);\n            list0idx = 1;\n        }\n        /* order list 0 by PicNum from max to min, see subclause 8.2.4.2.1 */\n        SortPicByPicNum(video->RefPicList0, list0idx);\n        video->refList0Size = list0idx;\n\n        /* long term handling */\n        for (i = 0; i < dpb->num_fs; i++)\n        {\n            if (dpb->fs[i]->IsLongTerm == 3)\n            {\n                /* subclause 8.2.4.1 Decoding process for picture numbers. */\n                dpb->fs[i]->frame.LongTermPicNum = dpb->fs[i]->LongTermFrameIdx;\n                video->RefPicList0[list0idx++] = &(dpb->fs[i]->frame);\n            }\n        }\n\n        /* order PicNum from min to max, see subclause 8.2.4.2.1  */\n        SortPicByPicNumLongTerm(&(video->RefPicList0[video->refList0Size]), list0idx - video->refList0Size);\n        video->refList0Size = list0idx;\n\n\n        video->refList1Size = 0;\n    }\n\n\n    if ((video->refList0Size == video->refList1Size) && (video->refList0Size > 1))\n    {\n        /* check if lists are identical, if yes swap first two elements of listX[1] */\n        /* last paragraph of subclause 8.2.4.2.4 */\n\n        for (i = 0; i < video->refList0Size; i++)\n        {\n            if (video->RefPicList0[i] != video->RefPicList1[i])\n            {\n                break;\n            }\n        }\n        if (i == video->refList0Size)\n        {\n            tmp_s = video->RefPicList1[0];\n            video->RefPicList1[0] = video->RefPicList1[1];\n            video->RefPicList1[1] = tmp_s;\n        }\n    }\n\n    /* set max size */\n    video->refList0Size = AVC_MIN(video->refList0Size, (int)video->sliceHdr->num_ref_idx_l0_active_minus1 + 1);\n    video->refList1Size = AVC_MIN(video->refList1Size, (int)video->sliceHdr->num_ref_idx_l1_active_minus1 + 1);\n\n    return ;\n}\n/* see subclause 8.2.4.3 */\nOSCL_EXPORT_REF AVCStatus ReOrderList(AVCCommonObj *video)\n{\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"ReOrderList\");\n    \n    AVCSliceHeader *sliceHdr = video->sliceHdr;\n    AVCStatus status = AVC_SUCCESS;\n    int slice_type = video->slice_type;\n\n    if (slice_type != AVC_I_SLICE)\n    {\n        if (sliceHdr->ref_pic_list_reordering_flag_l0)\n        {\n            status = ReorderRefPicList(video, 0);\n            if (status != AVC_SUCCESS)\n                return status;\n        }\n        if (video->refList0Size == 0)\n        {\n            return AVC_FAIL;\n        }\n    }\n    return status;\n}\n\nAVCStatus ReorderRefPicList(AVCCommonObj *video, int isL1)\n{\n    AVCSliceHeader *sliceHdr = video->sliceHdr;\n    AVCStatus status;\n\n    int *list_size;\n    int num_ref_idx_lX_active_minus1;\n    uint *remapping_of_pic_nums_idc;\n    int *abs_diff_pic_num_minus1;\n    int *long_term_pic_idx;\n    int i;\n    int maxPicNum, currPicNum, picNumLXNoWrap, picNumLXPred, picNumLX;\n    int refIdxLX = 0;\n    void* tmp;\n\n    if (!isL1) /* list 0 */\n    {\n        list_size = &(video->refList0Size);\n        num_ref_idx_lX_active_minus1 = sliceHdr->num_ref_idx_l0_active_minus1;\n        remapping_of_pic_nums_idc = sliceHdr->reordering_of_pic_nums_idc_l0;\n        tmp = (void*)sliceHdr->abs_diff_pic_num_minus1_l0;\n        abs_diff_pic_num_minus1 = (int*) tmp;\n        tmp = (void*)sliceHdr->long_term_pic_num_l0;\n        long_term_pic_idx = (int*) tmp;\n    }\n    else\n    {\n        list_size = &(video->refList1Size);\n        num_ref_idx_lX_active_minus1 = sliceHdr->num_ref_idx_l1_active_minus1;\n        remapping_of_pic_nums_idc = sliceHdr->reordering_of_pic_nums_idc_l1;\n        tmp = (void*) sliceHdr->abs_diff_pic_num_minus1_l1;\n        abs_diff_pic_num_minus1 = (int*) tmp;\n        tmp = (void*) sliceHdr->long_term_pic_num_l1;\n        long_term_pic_idx = (int*)tmp;\n    }\n\n    maxPicNum = video->MaxPicNum;\n    currPicNum = video->CurrPicNum;\n\n    picNumLXPred = currPicNum; /* initial value */\n\n    for (i = 0; remapping_of_pic_nums_idc[i] != 3; i++)\n    {\n        if ((remapping_of_pic_nums_idc[i] > 3) || (i >= MAX_REF_PIC_LIST_REORDERING))\n        {\n            return AVC_FAIL; /* out of range */\n        }\n        /* see subclause 8.2.4.3.1 */\n        if (remapping_of_pic_nums_idc[i] < 2)\n        {\n            if (remapping_of_pic_nums_idc[i] == 0)\n            {\n                if (picNumLXPred - (abs_diff_pic_num_minus1[i] + 1) < 0)\n                    picNumLXNoWrap = picNumLXPred - (abs_diff_pic_num_minus1[i] + 1) + maxPicNum;\n                else\n                    picNumLXNoWrap = picNumLXPred - (abs_diff_pic_num_minus1[i] + 1);\n            }\n            else /* (remapping_of_pic_nums_idc[i] == 1) */\n            {\n                if (picNumLXPred + (abs_diff_pic_num_minus1[i] + 1)  >=  maxPicNum)\n                    picNumLXNoWrap = picNumLXPred + (abs_diff_pic_num_minus1[i] + 1) - maxPicNum;\n                else\n                    picNumLXNoWrap = picNumLXPred + (abs_diff_pic_num_minus1[i] + 1);\n            }\n            picNumLXPred = picNumLXNoWrap; /* prediction for the next one */\n\n            if (picNumLXNoWrap > currPicNum)\n                picNumLX = picNumLXNoWrap - maxPicNum;\n            else\n                picNumLX = picNumLXNoWrap;\n\n            status = ReorderShortTerm(video, picNumLX, &refIdxLX, isL1);\n            if (status != AVC_SUCCESS)\n            {\n                return status;\n            }\n        }\n        else /* (remapping_of_pic_nums_idc[i] == 2), subclause 8.2.4.3.2 */\n        {\n            status = ReorderLongTerm(video, long_term_pic_idx[i], &refIdxLX, isL1);\n            if (status != AVC_SUCCESS)\n            {\n                return status;\n            }\n        }\n    }\n    /* that's a definition */\n    *list_size = num_ref_idx_lX_active_minus1 + 1;\n\n    return AVC_SUCCESS;\n}\n\n/* see subclause 8.2.4.3.1 */\nAVCStatus ReorderShortTerm(AVCCommonObj *video, int picNumLX, int *refIdxLX, int isL1)\n{\n    int cIdx, nIdx;\n    int num_ref_idx_lX_active_minus1;\n    AVCPictureData *picLX, **RefPicListX;\n\n    if (!isL1) /* list 0 */\n    {\n        RefPicListX = video->RefPicList0;\n        num_ref_idx_lX_active_minus1 = video->sliceHdr->num_ref_idx_l0_active_minus1;\n    }\n    else\n    {\n        RefPicListX = video->RefPicList1;\n        num_ref_idx_lX_active_minus1 = video->sliceHdr->num_ref_idx_l1_active_minus1;\n    }\n\n    picLX = GetShortTermPic(video, picNumLX);\n\n    if (picLX == NULL)\n    {\n        return AVC_FAIL;\n    }\n    /* Note RefPicListX has to access element number num_ref_idx_lX_active */\n    /* There could be access violation here. */\n    if (num_ref_idx_lX_active_minus1 + 1 >= MAX_REF_PIC_LIST)\n    {\n        return AVC_FAIL;\n    }\n\n    for (cIdx = num_ref_idx_lX_active_minus1 + 1; cIdx > *refIdxLX; cIdx--)\n    {\n        RefPicListX[ cIdx ] = RefPicListX[ cIdx - 1];\n    }\n\n    RefPicListX[(*refIdxLX)++ ] = picLX;\n\n    nIdx = *refIdxLX;\n\n    for (cIdx = *refIdxLX; cIdx <= num_ref_idx_lX_active_minus1 + 1; cIdx++)\n    {\n        if (RefPicListX[ cIdx ])\n        {\n            if ((RefPicListX[ cIdx ]->isLongTerm) || ((int)RefPicListX[ cIdx ]->PicNum != picNumLX))\n            {\n                RefPicListX[ nIdx++ ] = RefPicListX[ cIdx ];\n            }\n        }\n    }\n    return AVC_SUCCESS;\n}\n\n/* see subclause 8.2.4.3.2 */\nAVCStatus ReorderLongTerm(AVCCommonObj *video, int LongTermPicNum, int *refIdxLX, int isL1)\n{\n    AVCPictureData **RefPicListX;\n    int num_ref_idx_lX_active_minus1;\n    int cIdx, nIdx;\n    AVCPictureData *picLX;\n\n    if (!isL1) /* list 0 */\n    {\n        RefPicListX = video->RefPicList0;\n        num_ref_idx_lX_active_minus1 = video->sliceHdr->num_ref_idx_l0_active_minus1;\n    }\n    else\n    {\n        RefPicListX = video->RefPicList1;\n        num_ref_idx_lX_active_minus1 = video->sliceHdr->num_ref_idx_l1_active_minus1;\n    }\n\n    picLX = GetLongTermPic(video, LongTermPicNum);\n    if (picLX == NULL)\n    {\n        return AVC_FAIL;\n    }\n    /* Note RefPicListX has to access element number num_ref_idx_lX_active */\n    /* There could be access violation here. */\n    if (num_ref_idx_lX_active_minus1 + 1 >= MAX_REF_PIC_LIST)\n    {\n        return AVC_FAIL;\n    }\n    for (cIdx = num_ref_idx_lX_active_minus1 + 1; cIdx > *refIdxLX; cIdx--)\n        RefPicListX[ cIdx ] = RefPicListX[ cIdx - 1];\n\n    RefPicListX[(*refIdxLX)++ ] = picLX;\n\n    nIdx = *refIdxLX;\n\n    for (cIdx = *refIdxLX; cIdx <= num_ref_idx_lX_active_minus1 + 1; cIdx++)\n    {\n        if ((!RefPicListX[ cIdx ]->isLongTerm) || ((int)RefPicListX[ cIdx ]->LongTermPicNum != LongTermPicNum))\n        {\n            RefPicListX[ nIdx++ ] = RefPicListX[ cIdx ];\n        }\n    }\n    return AVC_SUCCESS;\n}\n\n\nAVCPictureData*  GetShortTermPic(AVCCommonObj *video, int picNum)\n{\n    int i;\n    AVCDecPicBuffer *dpb = video->decPicBuf;\n\n    for (i = 0; i < dpb->num_fs; i++)\n    {\n\n        if (dpb->fs[i]->IsReference == 3)\n        {\n            if ((dpb->fs[i]->frame.isLongTerm == FALSE) && (dpb->fs[i]->frame.PicNum == picNum))\n            {\n                return &(dpb->fs[i]->frame);\n            }\n        }\n\n    }\n\n    return NULL;\n}\n\nAVCPictureData*  GetLongTermPic(AVCCommonObj *video, int LongtermPicNum)\n{\n    AVCDecPicBuffer *dpb = video->decPicBuf;\n    int i;\n\n    for (i = 0; i < dpb->num_fs; i++)\n    {\n\n        if (dpb->fs[i]->IsReference == 3)\n        {\n            if ((dpb->fs[i]->frame.isLongTerm == TRUE) && (dpb->fs[i]->frame.LongTermPicNum == LongtermPicNum))\n            {\n                return &(dpb->fs[i]->frame);\n            }\n        }\n\n    }\n    return NULL;\n}\n\nint is_short_ref(AVCPictureData *s)\n{\n    return ((s->isReference) && !(s->isLongTerm));\n}\n\nint is_long_ref(AVCPictureData *s)\n{\n    return ((s->isReference) && (s->isLongTerm));\n}\n\n\n/* sort by PicNum, descending order */\nvoid SortPicByPicNum(AVCPictureData *data[], int num)\n{\n    int i, j;\n    AVCPictureData *temp;\n\n    for (i = 0; i < num - 1; i++)\n    {\n        for (j = i + 1; j < num; j++)\n        {\n            if (data[j]->PicNum > data[i]->PicNum)\n            {\n                temp = data[j];\n                data[j] = data[i];\n                data[i] = temp;\n            }\n        }\n    }\n\n    return ;\n}\n\n/* sort by PicNum, ascending order */\nvoid SortPicByPicNumLongTerm(AVCPictureData *data[], int num)\n{\n    int i, j;\n    AVCPictureData *temp;\n\n    for (i = 0; i < num - 1; i++)\n    {\n        for (j = i + 1; j < num; j++)\n        {\n            if (data[j]->LongTermPicNum < data[i]->LongTermPicNum)\n            {\n                temp = data[j];\n                data[j] = data[i];\n                data[i] = temp;\n            }\n        }\n    }\n\n    return ;\n}\n\n\n/* sort by FrameNumWrap, descending order */\nvoid SortFrameByFrameNumWrap(AVCFrameStore *data[], int num)\n{\n    int i, j;\n    AVCFrameStore *temp;\n\n    for (i = 0; i < num - 1; i++)\n    {\n        for (j = i + 1; j < num; j++)\n        {\n            if (data[j]->FrameNumWrap > data[i]->FrameNumWrap)\n            {\n                temp = data[j];\n                data[j] = data[i];\n                data[i] = temp;\n            }\n        }\n    }\n\n    return ;\n}\n\n/* sort frames by LongTermFrameIdx, ascending order */\nvoid SortFrameByLTFrameIdx(AVCFrameStore *data[], int num)\n{\n    int i, j;\n    AVCFrameStore *temp;\n\n    for (i = 0; i < num - 1; i++)\n    {\n        for (j = i + 1; j < num; j++)\n        {\n            if (data[j]->LongTermFrameIdx < data[i]->LongTermFrameIdx)\n            {\n                temp = data[j];\n                data[j] = data[i];\n                data[i] = temp;\n            }\n        }\n    }\n\n    return ;\n}\n\n/* sort PictureData by POC in descending order */\nvoid SortPicByPOC(AVCPictureData *data[], int num, int descending)\n{\n    int i, j;\n    AVCPictureData *temp;\n\n    if (descending)\n    {\n        for (i = 0; i < num - 1; i++)\n        {\n            for (j = i + 1; j < num; j++)\n            {\n                if (data[j]->PicOrderCnt > data[i]->PicOrderCnt)\n                {\n                    temp = data[j];\n                    data[j] = data[i];\n                    data[i] = temp;\n                }\n            }\n        }\n    }\n    else\n    {\n        for (i = 0; i < num - 1; i++)\n        {\n            for (j = i + 1; j < num; j++)\n            {\n                if (data[j]->PicOrderCnt < data[i]->PicOrderCnt)\n                {\n                    temp = data[j];\n                    data[j] = data[i];\n                    data[i] = temp;\n                }\n            }\n        }\n    }\n    return ;\n}\n\n/* sort PictureData by LongTermPicNum in ascending order */\nvoid SortPicByLTPicNum(AVCPictureData *data[], int num)\n{\n    int i, j;\n    AVCPictureData *temp;\n\n    for (i = 0; i < num - 1; i++)\n    {\n        for (j = i + 1; j < num; j++)\n        {\n            if (data[j]->LongTermPicNum < data[i]->LongTermPicNum)\n            {\n                temp = data[j];\n                data[j] = data[i];\n                data[i] = temp;\n            }\n        }\n    }\n\n    return ;\n}\n\n/* sort by PicOrderCnt, descending order */\nvoid SortFrameByPOC(AVCFrameStore *data[], int num, int descending)\n{\n    int i, j;\n    AVCFrameStore *temp;\n\n    if (descending)\n    {\n        for (i = 0; i < num - 1; i++)\n        {\n            for (j = i + 1; j < num; j++)\n            {\n                if (data[j]->PicOrderCnt > data[i]->PicOrderCnt)\n                {\n                    temp = data[j];\n                    data[j] = data[i];\n                    data[i] = temp;\n                }\n            }\n        }\n    }\n    else\n    {\n        for (i = 0; i < num - 1; i++)\n        {\n            for (j = i + 1; j < num; j++)\n            {\n                if (data[j]->PicOrderCnt < data[i]->PicOrderCnt)\n                {\n                    temp = data[j];\n                    data[j] = data[i];\n                    data[i] = temp;\n                }\n            }\n        }\n    }\n\n    return ;\n}\n\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/Android.mk",
    "content": "#\n# Copyright (C) 2008 The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# This makefile supplies the rules for building a library of JNI code for\n# use by our example platform shared library.\n\nLOCAL_PATH:= $(call my-dir)\ninclude $(CLEAR_VARS)\n\nLOCAL_MODULE_TAGS := optional\n\n# This is the target being built.\nLOCAL_MODULE:= libH264Decoder\n\n# All of the source files that we will compile.\nLOCAL_SRC_FILES:= \\\n\tsrc/3GPVideoParser.cpp \\\n\tsrc/avc_bitstream.cpp \\\n\tsrc/avcdec_api.cpp \\\n\tsrc/header.cpp \\\n\tsrc/itrans.cpp \\\n\tsrc/pred_inter.cpp \\\n\tsrc/pred_intra.cpp \\\n\tsrc/residual.cpp \\\n\tsrc/slice.cpp \\\n\tsrc/vlc.cpp \\\n\tsrc/yuv2rgb.cpp \\\n\tsrc/pvavcdecoder.cpp \\\n\tsrc/NativeH264Decoder.cpp \\\n\t../common/src/deblock.cpp \\\n\t../common/src/dpb.cpp \\\n\t../common/src/fmo.cpp \\\n\t../common/src/mb_access.cpp \\\n\t../common/src/reflist.cpp\n\n# All of the shared libraries we link against.\nLOCAL_SHARED_LIBRARIES := \n\n# No static libraries.\nLOCAL_STATIC_LIBRARIES :=\n\n# Also need the JNI headers.\nLOCAL_C_INCLUDES += \\\n\t$(JNI_H_INCLUDE)\\\n\t$(LOCAL_PATH)/src \\\n \t$(LOCAL_PATH)/include \\\n\t$(AVC_ROOT)/oscl \\\n\t$(AVC_ROOT)/common/include\n\n# No specia compiler flags.\nLOCAL_CFLAGS +=\n\n# Link libs (ex logs)\nLOCAL_LDLIBS := -llog\n\n# Don't prelink this library.  For more efficient code, you may want\n# to add this library to the prelink map and set this to true.\nLOCAL_PRELINK_MODULE := false\n\ninclude $(BUILD_SHARED_LIBRARY)\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/include/avcdec_api.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/**\nThis file contains application function interfaces to the AVC decoder library\nand necessary type defitionitions and enumerations.\n@publishedAll\n*/\n\n#ifndef _AVCDEC_API_H_\n#define _AVCDEC_API_H_\n\n#ifndef OSCL_BASE_H_INCLUDED\n#include \"oscl_base.h\"\n#endif\n\n#include \"avcapi_common.h\"\n\n/**\n This enumeration is used for the status returned from the library interface.\n*/\ntypedef enum\n{\n    /**\n    The followings are fail with details. Their values are negative.\n    */\n    AVCDEC_NO_DATA = -4,\n    AVCDEC_NOT_SUPPORTED = -3,\n    /**\n    Fail information\n    */\n    AVCDEC_NO_BUFFER = -2, /* no output picture buffer available */\n    AVCDEC_MEMORY_FAIL = -1, /* memory allocation failed */\n    AVCDEC_FAIL = 0,\n    /**\n    Generic success value\n    */\n    AVCDEC_SUCCESS = 1,\n    AVCDEC_PICTURE_OUTPUT_READY = 2,\n    AVCDEC_PICTURE_READY = 3,\n\n    /**\n    The followings are success with warnings. Their values are positive integers.\n    */\n    AVCDEC_NO_NEXT_SC = 4,\n    AVCDEC_REDUNDANT_FRAME = 5,\n    AVCDEC_CONCEALED_FRAME = 6  /* detect and conceal the error */\n} AVCDec_Status;\n\n\n/**\nThis structure contains sequence parameters information.\n*/\ntypedef struct tagAVCDecSPSInfo\n{\n    int FrameWidth;\n    int FrameHeight;\n    uint frame_only_flag;\n    int  frame_crop_left;\n    int  frame_crop_right;\n    int  frame_crop_top;\n    int  frame_crop_bottom;\n    int  num_frames; // minimal number of YUV frame buffers required\n\n} AVCDecSPSInfo;\n\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n    /** THE FOLLOWINGS ARE APIS */\n    /**\n    This function parses one NAL unit from byte stream format input according to Annex B.\n    \\param \"bitstream\"  \"Pointer to the bitstream buffer.\"\n    \\param \"nal_unit\"   \"Point to pointer and the location of the start of the first NAL unit\n                         found in bitstream.\"\n    \\param \"size\"       \"As input, the pointer to the size of bitstream in bytes. As output,\n                         the value is changed to be the size of the found NAL unit.\"\n    \\return \"AVCDEC_SUCCESS if success, AVCDEC_FAIL if no first start code is found, AVCDEC_NO_NEX_SC if\n            the first start code is found, but the second start code is missing (potential partial NAL).\"\n    */\n    OSCL_IMPORT_REF AVCDec_Status PVAVCAnnexBGetNALUnit(uint8 *bitstream, uint8 **nal_unit, int *size);\n\n    /**\n    This function sniffs the nal_unit_type such that users can call corresponding APIs.\n    \\param \"bitstream\"  \"Pointer to the beginning of a NAL unit (start with forbidden_zero_bit, etc.).\"\n    \\param \"size\"       \"size of the bitstream (NumBytesInNALunit + 1).\"\n    \\param \"nal_unit_type\" \"Pointer to the return value of nal unit type.\"\n    \\return \"AVCDEC_SUCCESS if success, AVCDEC_FAIL otherwise.\"\n    */\n    OSCL_IMPORT_REF AVCDec_Status PVAVCDecGetNALType(uint8 *bitstream, int size, int *nal_type, int *nal_ref_idc);\n\n    /**\n    This function decodes the sequence parameters set, initializes related parameters and\n    allocates memory (reference frames list), must also be compliant with Annex A.\n    It is equivalent to decode VOL header of MPEG4.\n    \\param \"avcHandle\"  \"Handle to the AVC decoder library object.\"\n    \\param \"nal_unit\"   \"Pointer to the buffer containing single NAL unit.\n                        The content will change due to EBSP-to-RBSP conversion.\"\n    \\param \"nal_size\"       \"size of the bitstream NumBytesInNALunit.\"\n    \\return \"AVCDEC_SUCCESS if success,\n            AVCDEC_FAIL if profile and level is not supported,\n            AVCDEC_MEMORY_FAIL if memory allocations return null.\"\n    */\n    OSCL_IMPORT_REF AVCDec_Status PVAVCDecSeqParamSet(AVCHandle *avcHandle, uint8 *nal_unit, int nal_size);\n\n    /**\n    This function returns sequence parameters such as dimension and field flag of the most recently\n    decoded SPS. More can be added later or grouped together into a structure. This API can be called\n    after PVAVCInitSequence. If no sequence parameter has been decoded yet, it will return AVCDEC_FAIL.\n\n    \\param \"avcHandle\"  \"Handle to the AVC decoder library object.\"\n    \\param \"seqInfo\"    \"Pointer to the AVCDecSeqParamInfo structure.\"\n    \\return \"AVCDEC_SUCCESS if success and AVCDEC_FAIL if fail.\"\n    \\note \"This API returns the SPS Info of the most recently decoded SPS (to be used right after PVAVCDecSeqParamSet).\"\n    */\n    OSCL_IMPORT_REF AVCDec_Status PVAVCDecGetSeqInfo(AVCHandle *avcHandle, AVCDecSPSInfo *seqInfo);\n\n    /**\n    This function decodes the picture parameters set and initializes related parameters. Note thate\n    the PPS may not be present for every picture.\n    \\param \"avcHandle\"  \"Handle to the AVC decoder library object.\"\n    \\param \"nal_unit\"   \"Pointer to the buffer containing single NAL unit.\n                        The content will change due to EBSP-to-RBSP conversion.\"\n    \\param \"nal_size\"       \"size of the bitstream NumBytesInNALunit.\"\n    \\return \"AVCDEC_SUCCESS if success, AVCDEC_FAIL if profile and level is not supported.\"\n    */\n    OSCL_IMPORT_REF AVCDec_Status PVAVCDecPicParamSet(AVCHandle *avcHandle, uint8 *nal_unit, int nal_size);\n\n    /**\n    This function decodes one NAL unit of bitstream. The type of nal unit is one of the\n    followings, 1, 5. (for now, no data partitioning, type 2,3,4).\n    \\param \"avcHandle\"  \"Handle to the AVC decoder library object.\"\n    \\param \"nal_unit\"   \"Pointer to the buffer containing a single or partial NAL unit.\n                        The content will change due to EBSP-to-RBSP conversion.\"\n    \\param \"buf_size\"   \"Size of the buffer (less than or equal nal_size).\"\n    \\param \"nal_size\"   \"size of the current NAL unit NumBytesInNALunit.\"\n    \\return \"AVCDEC_PICTURE_READY for success and an output is ready,\n            AVCDEC_SUCCESS for success but no output is ready,\n            AVCDEC_PACKET_LOSS is GetData returns AVCDEC_PACKET_LOSS,\n            AVCDEC_FAIL if syntax error is detected,\n            AVCDEC_MEMORY_FAIL if memory is corrupted.\n            AVCDEC_NO_PICTURE if no frame memory to write to (users need to get output and/or return picture).\n            AVCDEC_REDUNDANT_PICTURE if error has been detected in the primary picture and redundant picture is available,\n            AVCDEC_CONCEALED_PICTURE if error has been detected and decoder has concealed it.\"\n    */\n    OSCL_IMPORT_REF AVCDec_Status PVAVCDecSEI(AVCHandle *avcHandle, uint8 *nal_unit, int nal_size);\n\n    OSCL_IMPORT_REF AVCDec_Status PVAVCDecodeSlice(AVCHandle *avcHandle, uint8 *buffer, int buf_size);\n\n    /**\n    Check the availability of the decoded picture in decoding order (frame_num).\n    The AVCFrameIO also provide displaying order information such that the application\n    can re-order the frame for display. A picture can be retrieved only once.\n    \\param \"avcHandle\"  \"Handle to the AVC decoder library object.\"\n    \\param \"output\"      \"Pointer to the AVCOutput structure. Note that decoder library will\n                        not re-used the pixel memory in this structure until it has been returned\n                        thru PVAVCReleaseOutput API.\"\n    \\return \"AVCDEC_SUCCESS for success, AVCDEC_FAIL if no picture is available to be displayed,\n            AVCDEC_PICTURE_READY if there is another picture to be displayed.\"\n    */\n    OSCL_IMPORT_REF AVCDec_Status PVAVCDecGetOutput(AVCHandle *avcHandle, int *indx, int *release_flag, AVCFrameIO *output);\n\n    /**\n    This function resets the decoder and expects to see the next IDR slice.\n    \\param \"avcHandle\"  \"Handle to the AVC decoder library object.\"\n    */\n    OSCL_IMPORT_REF void    PVAVCDecReset(AVCHandle *avcHandle);\n\n    /**\n    This function performs clean up operation including memory deallocation.\n    \\param \"avcHandle\"  \"Handle to the AVC decoder library object.\"\n    */\n    OSCL_IMPORT_REF void    PVAVCCleanUpDecoder(AVCHandle *avcHandle);\n//AVCDec_Status EBSPtoRBSP(uint8 *nal_unit,int *size);\n\n\n\n\n    /** CALLBACK FUNCTION TO BE IMPLEMENTED BY APPLICATION */\n    /** In AVCHandle structure, userData is a pointer to an object with the following\n        member functions.\n    */\n    AVCDec_Status CBAVCDec_GetData(uint32 *userData, unsigned char **buffer, unsigned int *size);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* _AVCDEC_API_H_ */\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/include/pvavcdecoder.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef PVAVCDECODER_H_INCLUDED\n#define PVAVCDECODER_H_INCLUDED\n\n#ifndef PVAVCDECODERINTERFACE_H_INCLUDED\n#include \"pvavcdecoderinterface.h\"\n#endif\n\n#ifndef AVCDEC_API_H_INCLUDED\n#include \"avcdec_api.h\"\n#endif\n\n// AVC video decoder\nclass PVAVCDecoder : public PVAVCDecoderInterface\n{\n\n    public:\n        static PVAVCDecoder* New(void);\n        virtual ~PVAVCDecoder();\n        virtual int AVC_DPBAlloc(uint frame_size_in_mbs, uint num_buffers);\n        virtual void AVC_FrameUnbind(int indx);\n        virtual int AVC_FrameBind(int indx, uint8** yuv);\n        virtual void    CleanUpAVCDecoder(void);\n        virtual void    ResetAVCDecoder(void);\n        virtual int32   DecodeSPS(uint8 *bitstream, int32 buffer_size);\n        virtual int32   DecodePPS(uint8 *bitstream, int32 buffer_size);\n        virtual int32   DecodeAVCSlice(uint8 *bitstream, int32 *buffer_size);\n        virtual bool    GetDecOutput(int *indx, int *release, AVCFrameIO* output);\n        virtual void    GetVideoDimensions(int32 *width, int32 *height, int32 *top, int32 *left, int32 *bottom, int32 *right);\n\n    private:\n        PVAVCDecoder();\n\n        bool Construct(void);\n        AVCHandle iAvcHandle;\n        uint8*  iDPB;\n        bool*   iFrameUsed;\n        uint8** iFramePtr;\n        int     iNumFrames;\n};\n\n#endif\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/include/pvavcdecoder_factory.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef PVAVCDECODER_FACTORY_H_INCLUDED\n#define PVAVCDECODER_FACTORY_H_INCLUDED\n\n#ifndef OSCL_BASE_H_INCLUDED\n#include \"oscl_base.h\"\n#endif\n\n#ifndef OSCL_MEM_H_INCLUDED\n#include \"oscl_mem.h\"\n#endif\n\nclass PVAVCDecoderInterface;\n\nclass PVAVCDecoderFactory\n{\n    public:\n        /**\n         * Creates an instance of a PVAVCDecoder. If the creation fails, this function will leave.\n         *\n         * @returns A pointer to an instance of PVAVCDecoder as PVAVCDecoderInterface reference or leaves if instantiation fails\n         **/\n        OSCL_IMPORT_REF static PVAVCDecoderInterface* CreatePVAVCDecoder(void);\n\n        /**\n         * Deletes an instance of PVAVCDecoder and reclaims all allocated resources.\n         *\n         * @param aVideoDec The PVAVCDecoder instance to be deleted\n         * @returns A status code indicating success or failure of deletion\n         **/\n        OSCL_IMPORT_REF static bool DeletePVAVCDecoder(PVAVCDecoderInterface* aVideoDec);\n};\n\n#endif\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/include/pvavcdecoderinterface.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef PVAVCDECODERINTERFACE_H_INCLUDED\n#define PVAVCDECODERINTERFACE_H_INCLUDED\n\n// includes\n#ifndef OSCL_BASE_H_INCLUDED\n#include \"oscl_base.h\"\n#endif\n\n#ifndef AVCDEC_API_H_INCLUDED\n#include \"avcdec_api.h\"\n#endif\n\ntypedef void (*FunctionType_Unbind)(void *, int);\ntypedef int (*FunctionType_Alloc)(void *, int, uint8 **);\ntypedef int (*FunctionType_SPS)(void *, uint, uint);\ntypedef int (*FunctionType_Malloc)(void *, int32, int);\ntypedef void(*FunctionType_Free)(void *, int);\n\n\n// PVAVCDecoderInterface pure virtual interface class\nclass PVAVCDecoderInterface\n{\n    public:\n        virtual ~PVAVCDecoderInterface() {};\n        virtual void    CleanUpAVCDecoder(void) = 0;\n        virtual void    ResetAVCDecoder(void) = 0;\n        virtual int32   DecodeSPS(uint8 *bitstream, int32 buffer_size) = 0;\n        virtual int32   DecodePPS(uint8 *bitstream, int32 buffer_size) = 0;\n        virtual int32   DecodeAVCSlice(uint8 *bitstream, int32 *buffer_size) = 0;\n        virtual bool    GetDecOutput(int *indx, int *release, AVCFrameIO* output) = 0;\n        virtual void    GetVideoDimensions(int32 *width, int32 *height, int32 *top, int32 *left, int32 *bottom, int32 *right) = 0;\n};\n\n#endif // PVAVCDECODERINTERFACE_H_INCLUDED\n\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/src/3GPVideoParser.cpp",
    "content": "/*\n * Copyright (C) 2009 OrangeLabs\n * 3GPVideoParser.cpp\n *\n *  Created on: 12 août 2009\n *      Author: rglt1266\n */\n#define LOG_TAG \"3GPPSampleReader\"\n#include <stdio.h>\n#include <stdlib.h>\n#include <stdarg.h>\n#include <string.h>\n#include <limits.h>\n#include \"3GPVideoParser.h\"\n\n/* Variables */\nFILE* f = NULL; // File to parse\n\nuint32 TimeScale = 0; // Ticks per second\nuint32 VideoLength = 0; // Video length (time)\nuint32 VideoWidth = 0;\nuint32 VideoHeight = 0;\nchar VideoCodec[5]; // Codec type: d263/mp4v....\n\nuint32 moovAtomPtr = 0;\nuint32 moovAtomSize = 0;\nuint32 trakAtomPtr = 0;\nuint32 trakAtomSize = 0;\n\n/* Buffers and pointers*/\nuint8* moovBuff = 0;\nuint8* sttsPtr = 0;\nuint8* stcoPtr = 0;\nuint8* stszPtr = 0;\nuint8* stscPtr = 0;\nuint8* stsdPtr = 0;\nSample* samplePtr = 0;\n\n/**\n * Endien convert\n */\nuint32 EndienConvert (uint32 input){\n\treturn ((input & 0xFF) << 24) | ((input & 0xFF00) << 8) |\t((uint32)(input & 0xFF0000) >> 8) | ((uint32)(input & 0xFF000000) >> 24);\n}\n\n/**\n * Get a uint32 value at a precised position in a uint8 buffer\n */\nuint32 getUint32FromUint8Buffer (uint8* buffer,uint32 offset){\n\treturn ( ((buffer[offset]<<24)& 0xff000000) | ((buffer[offset+1]<<16)& 0xff0000) | ((buffer[offset+2]<<8)& 0xff00) | ((buffer[offset+3])& 0xff));\n}\n\n/**\n * Find a particular value in a uint8 buffer reading uint32\n */\nint32 findAtom (uint8* buffer,uint32 bufferSize, uint32 valueToFind){\n\tuint32 tmp;\n\tuint32 i = 0;\n\tfor (i=0;i<(bufferSize-4);i++){\n\t\ttmp = getUint32FromUint8Buffer(buffer,i);\n\t\tif (tmp == valueToFind){\n\t\t\treturn i-4;\n\t\t}\n\t}\n\treturn VPAtomError;\n}\n\n/**\n * Find a particular value in a uint32 buffer\n */\nint32 findAtom (uint32* buffer,uint32 bufferSize, uint32 valueToFind){\n\tuint32 i = 0;\n\tfor (i=0;i<(bufferSize);i++){\n\t\tif (EndienConvert(buffer[i]) == valueToFind){\n\t\t\treturn i;\n\t\t}\n\t}\n\treturn VPAtomError;\n}\n\n/**\n* Cleanup the parser\n*\n* @return error code\n*/\nint cleanupParser(void){\n\t/* Clean atom info */\n\tfree(moovBuff);\n\tVideoWidth = 0;\n\tVideoHeight = 0;\n\tVideoCodec[0] = '\\0';\n\tVideoLength = 0;\n\treturn VPAtomSucces;\n}\n\n/**\n* Init the parser\n*\n* @param filePath path of the file to read\n* @param width check if the video width is correct\n* @param heigth check if the video height is correct\n* @return error code\n*/\nint Init3GPVideoParser (char *filePath){\n\tuint32 anAtomSize = 0;\n\tuint32 anAtomType = 0;\n\tuint32 trakOffset = 0;\n\n\tint32 pos = 0;\n\tint32 fileSize;\n\n\t/* Load file */\n\tf = fopen(filePath,\"r\");\n\tif (f == NULL) {\n\t  return VPAtomError;\n\t}\n\tfseek( f, 0L, SEEK_END );\n\tfileSize = ftell( f );\n\tif (fileSize <= 8 ) return VPAtomError; // File is too small !\n\n\t/* Check if file format is correct ie it's a 3gp file*/\n\tfseek(f,4,SEEK_SET);\n\tfread(&anAtomType,sizeof(uint32),1,f);\n\tanAtomType = EndienConvert(anAtomType);\n\tif (anAtomType != AtomFtyp) return VPAtomError;\n\n\t/* Start parsing from begining*/\n\trewind (f);\n\n\t// Find Moov Atom\n\twhile (ftell(f)<fileSize){\n\t\tfread(&anAtomSize,sizeof(uint32),1,f);\n\t\tanAtomSize = EndienConvert(anAtomSize);\n\t\tfread(&anAtomType,sizeof(uint32),1,f);\n\t\tanAtomType = EndienConvert(anAtomType);\n\t\tif (anAtomType == AtomMoov){\n\t\t\tmoovAtomPtr=ftell(f)-8;\n\t\t\tmoovAtomSize=anAtomSize;\n\t\t}\n\t\t// Switch to next Atom\n\t\tfseek(f,anAtomSize-8,SEEK_CUR);/* -8 is because we already read 2*4 Bytes of this Atom*/\n\t}\n\n\t/* Copy moov to buffer */\n\tmoovBuff = (uint8*)malloc(moovAtomSize);\n\tfseek(f,moovAtomPtr,SEEK_SET);\n\tfor (uint32 j=0;j<(moovAtomSize);j++){\n\t\tfread(&moovBuff[j],1,1,f);\n\t}\n\n\t// Find trak(s) Atom\n\tpos = findAtom(moovBuff,moovAtomSize,AtomTrak);\n\twhile (pos > 0) {\n\t\tint32 trakSize = getUint32FromUint8Buffer(moovBuff,pos);\n\t\tif (findAtom(moovBuff+pos,trakSize,AtomVmhd)){\n\t\t\ttrakAtomPtr = moovAtomPtr+pos;\n\t\t\ttrakAtomSize = trakSize;\n\t\t\tbreak;\n\t\t} else {\n\t\t\t// This is not the videotrack\n\t\t}\n\t\t// Trying to find new trak\n\t\tpos = findAtom(moovBuff+pos,moovAtomSize-pos,AtomTrak);\n\t}\n\tif (trakAtomPtr == 0) {\n\t    return VPAtomError;\n\t}\n\n\n\ttrakOffset = trakAtomPtr - moovAtomPtr;\n\n\t// Find MDHD\n\tpos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomMdhd);\n\tif (pos > 0){\n\t\tuint8* Ptr = moovBuff + trakOffset + pos + 16; // Skip Atom size and Atom name\n\t\tTimeScale = getUint32FromUint8Buffer(Ptr,4);\n\t\tVideoLength = getUint32FromUint8Buffer(Ptr,8);\n\t} else {\n\t\treturn VPAtomError;\n\t}\n\n\t// Find STTS\n\tpos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomStts);\n\tif (pos > 0){\n\t\tsttsPtr = moovBuff + trakOffset + pos + 16; // Skip Atom size and Atom name\n\t} else {\n\t\treturn VPAtomError;\n\t}\n\n\t// Find STSZ\n\tpos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomStsz);\n\tif (pos > 0){\n\t\tstszPtr = moovBuff + trakOffset + pos + 20; // Skip Atom size and Atom name\n\t} else {\n\t\treturn VPAtomError;\n\t}\n\t// Find STCO\n\tpos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomStco);\n\tif (pos > 0){\n\t\tstcoPtr = moovBuff + trakOffset + pos + 16; // Skip Atom size, Atom name, ...\n\t} else {\n\t\treturn VPAtomError;\n\t}\n\t// Find STSC\n\tpos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomStsc);\n\tif (pos > 0){\n\t\tstscPtr = moovBuff + trakOffset + pos + 16; // Skip Atom size, Atom name, ...\n\t} else {\n\t\treturn VPAtomError;\n\t}\n\t// Find STSD\n\tpos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomStsd);\n\tif (pos > 0){\n\t\tstsdPtr = moovBuff + trakOffset + pos + 16; // Skip Atom size and Atom name\n\t\tVideoWidth = (getUint32FromUint8Buffer(stsdPtr,32)>>16) & 0xFFFF;\n\t\tVideoHeight = getUint32FromUint8Buffer(stsdPtr,32) & 0xFFFF;\n\t\tVideoCodec[0] = *(stsdPtr+90);\n\t\tVideoCodec[1] = *(stsdPtr+91);\n\t\tVideoCodec[2] = *(stsdPtr+92);\n\t\tVideoCodec[3] = *(stsdPtr+93);\n\t\tVideoCodec[4]= '\\0';\n\t} else {\n\t      return VPAtomError;\n\t}\n\n\n\t/**\n\t * Prepare Sample list\n\t */\n\tuint32 countChunk = 0; // Total number of chunk\n\tuint32 currChunk=0; // Counter for current chunk\n\tuint32 currChunkInStsc=0; // Current chunk described in stsc Atom\n\tuint32 ChunkAddr = 0; // Current chunk offset\n\tuint32 countSample = 0; // Counter for sample in a chunk\n\tuint32 currSample = 0; // Counter for current sample (/total sample in file)\n\tuint32 SamplePerChunk = 0; // Value sample per chunk\n\tuint32 currStscPos = 0; // Current stsc table\n\tuint32 Offset = 0; // Offset from ChunkAddr to sample data start\n\tint32 currSttsPos = 0;\n\tuint32 SameTimestampCount = 0; // For case where n sample have the same timestamp\n\tuint32 temp;\n\tSample* currSamplePtr = 0; // Pointer to current Sample\n\tSample* aSample = 0; // Current Sample element\n\tbool initList = false; // Boolean changed after first sample is read\n\n\t/* Get \"Number of entries\" field of stco atom */\n\tcountChunk = getUint32FromUint8Buffer(stcoPtr-4,0);\n\t/* Init currChunk */\n\tcurrChunkInStsc = getUint32FromUint8Buffer(stscPtr,currStscPos*12);\n\n\tfor (currChunk=0;currChunk<countChunk;currChunk++){\n\t\tChunkAddr = getUint32FromUint8Buffer(stcoPtr,currChunk*4);\n\t\tif (currChunkInStsc == currChunk+1){\n\t\t\tSamplePerChunk = getUint32FromUint8Buffer(stscPtr,currStscPos*12+4);\n\t\t\tcurrStscPos++;\n\t\t\tcurrChunkInStsc = getUint32FromUint8Buffer(stscPtr,currStscPos*12);\n\t\t} else {\n\t\t\t// Repeat old value\n\t\t}\n\t\tOffset = 0;\n\t\tfor (countSample=0;countSample<SamplePerChunk;countSample++){\n\t\t\t/* Malloc a new sample */\n\t\t\taSample = (Sample*)malloc(sizeof(Sample));\n\t\t\t/* Get sample size */\n\t\t\taSample->size = getUint32FromUint8Buffer(stszPtr,currSample*4);\n\t\t\tcurrSample++;\n\t\t\t/* Get sample addr */\n\t\t\taSample->addr = ChunkAddr + Offset;\n\t\t\tOffset = Offset + aSample->size;\n\t\t\t/* Get sample timestamp */\n\t\t\tif (SameTimestampCount == 0){\n\t\t\t\t// Read new stts element\n\t\t\t\tSameTimestampCount = getUint32FromUint8Buffer(sttsPtr,currSttsPos*8);\n\t\t\t\tcurrSttsPos++;\n\t\t\t}\n\t\t\ttemp = getUint32FromUint8Buffer(sttsPtr,(currSttsPos-1)*8+4);\n\t\t\taSample->timestamp = (uint32)((temp*1000)/TimeScale);\n\t\t\tSameTimestampCount--;\n\t\t\t/* Set next to NULL */\n\t\t\taSample->next = NULL;\n\t\t\t/* Update the sample list */\n\t\t\tif (initList == false){\n\t\t\t\tsamplePtr = aSample;\n\t\t\t\tcurrSamplePtr = aSample;\n\t\t\t\tinitList = true;\n\t\t\t} else {\n\t\t\t\tcurrSamplePtr->next = aSample;\n\t\t\t\tcurrSamplePtr = aSample;\n\t\t\t\tcurrSamplePtr->next = NULL;\n\t\t\t}\n\t\t}\n\t}\n\treturn VPAtomSucces;\n}\n\n/**\n* Get Videoframe\n*\n* @param aOutBuffer buffer to write the videoframe\n* @param aBufferSize size of the buffer\n* @param aTimestamp timestamp\n* @return error code for overrun buffer\n*/\nint getFrame (uint8* aOutBuffer,uint32* aBufferSize, uint32* aTimestamp){\n\t// Temp sample to free data\n\tSample* tmp;\n\tif (samplePtr != NULL){\n\t\tif (aOutBuffer == NULL || f==NULL){\n\t\t    return VPAtomError;\n\t\t}\n\t\tfseek(f,samplePtr->addr,SEEK_SET);\n\t\tif (fread(aOutBuffer,1,samplePtr->size,f) != samplePtr->size){\n\t\t\treturn VPAtomError;\n\t\t}\n\t\t*aTimestamp = samplePtr->timestamp;\n\t\t*aBufferSize = samplePtr->size;\n\t\t/* Free the sample */\n\t\ttmp = samplePtr;\n\t\tsamplePtr = samplePtr->next;\n\t\tfree(tmp);\n\t\treturn VPAtomSucces;\n\t} else {\n\t\taOutBuffer = NULL;\n\t\t*aBufferSize = 0;\n\t\t*aTimestamp = 0;\n\t\treturn VPAtomError;\n\t}\n}\n\n/**\n * Release file by closing it\n *\n * @return error code\n */\nint release(){\n\tif (f != NULL){\n\t\tfclose(f);\n\t}\n\treturn cleanupParser();\n}\n\n/**\n * Get the video duration\n *\n * @return video duration in seconds ( last 3 digits are ms)\n */\nuint32 getVideoDuration (){\n\tuint32 retValue = 0;\n\tretValue = ((VideoLength/TimeScale)*1000)+(VideoLength%TimeScale);\n\treturn retValue;\n}\n\n/**\n * Get the video codec\n *\n * @return video codec string\n */\nchar* getVideoCodec (){\n\treturn VideoCodec;\n}\n\n/**\n * Get video width\n *\n * @return video width\n */\nuint32 getVideoWidth (){\n\treturn VideoWidth;\n}\n\n/**\n * Get the video height\n *\n * @return video height\n */\nuint32 getVideoHeight(){\n\treturn VideoHeight;\n}\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/src/3GPVideoParser.h",
    "content": "/*\n * Copyright (C) 2009 OrangeLabs\n * 3GPVideoParser.h\n *\n *  Created on: 12 août 2009\n *      Author: rglt1266\n */\n\n#ifndef _3GPVIDEOPARSER_H_\n#define _3GPVIDEOPARSER_H_\n\n/* Define new types */\ntypedef unsigned char uint8;\ntypedef unsigned short uint16;\ntypedef short int16;\ntypedef unsigned long uint32;\ntypedef long int32;\n\n#define DEBUG 1;\n\n/* Define important atoms 4Bytes code (char)*/\n#define\tAtomFtyp 0x66747970 /* File type compatibility atom */\n#define\tAtomMdat 0x6D646174 /* Movie sample data atom */\n#define\tAtomMoov 0x6D6F6F76 /* Movie ressource metadata atom */\n#define\tAtomMdhd 0x6D646864 /* Video media information header atom */\n#define\tAtomMvhd 0x6D766864 /* Video media information header atom */\n#define\tAtomStts 0x73747473 /* Time-to-sample atom */\n#define\tAtomStco 0x7374636F /* Sample-to-chunck atom */\n#define\tAtomTrak 0x7472616B /* Trak atom */\n#define\tAtomStsz 0x7374737A /* Sample size atom */\n#define AtomStsc 0x73747363 /* Nb of sample per chunck */\n#define AtomStsd 0x73747364 /* Nb of sample per chunck */\n#define AtomVmhd 0x766D6864 /* Identifier of a video track */\n\n/* Define error codes */\n#define VPAtomError 0\n#define VPAtomSucces 1\n\ntypedef struct {\n\tuint32 ptr;\n\tuint32 size;\n} Atom;\n\nstruct sample {\n\tuint32 addr;\n\tuint32 size;\n\tuint32 timestamp;\n\tstruct sample *next;\n};\ntypedef struct sample Sample;\n\nint Init3GPVideoParser (char *);\nint release();\nint getFrame (uint8*,uint32*, uint32*);\nuint32 getVideoDuration();\nuint32 getVideoWidth();\nuint32 getVideoHeight();\nchar* getVideoCodec();\n\n#endif /* 3GPVIDEOPARSER_H_ */\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/src/NativeH264Decoder.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 2009 OrangeLabs\n *\n * Author: Alexis Gilabert Senar\n * Date: 2009-07-01\n * -------------------------------------------------------------------\n */\n#define LOG_TAG \"NativeDec\"\n#include \"android/log.h\"\n#include \"NativeH264Decoder.h\"\n#include \"pvavcdecoder.h\"\n#include \"3GPVideoParser.h\"\n#include \"yuv2rgb.h\"\n\n// xxx pa try to read nal unit type\n#include \"avcdec_api.h\"\n\n\nint     iSrcWidth = 352;\nint     iSrcHeight = 288;\n\n\n#define MB_BASED_DEBLOCK\n\ntypedef enum {\n  SPS,\n  PPS,\n  SLICE\n} DEC_STATE;\n\n/*\n * Global variables\n *\n*/\n\n  PVAVCDecoder *decoder;\n  int parserInitialized = 0;\n  int decoderInitialized = 0;\n\n  uint8*        aOutBuffer;\n  uint8*        aInputBuf;\n\n  DEC_STATE         state;\n  AVCFrameIO outVid;\n\n/**\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder\n * Method:    InitDecoder\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_InitDecoder\n  (JNIEnv * env, jclass clazz){\n\n  state = SPS;\n  aOutBuffer = (uint8*)malloc(iSrcWidth*iSrcHeight*3/2);\n  decoder = PVAVCDecoder::New();\n  return (decoder!=NULL)?1:0;\n}\n\n/**\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder\n * Method:    DeinitDecoder\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_DeinitDecoder\n  (JNIEnv * env, jclass clazz){\n    state = SPS;\n    free(aOutBuffer);\n    delete(decoder);\n    return 1;\n}\n\n/**\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder\n * Method:    DecodeAndConvert\n * Signature: ([B[IJ)[I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_DecodeAndConvert\n  (JNIEnv *env, jclass clazz, jbyteArray h264Frame, jintArray decoded)\n{\n  int32 size = 0;\n  int32 status;\n  int                 indexFrame;\n  int                 releaseFrame;\n  /* Set volbuf with h263Frame data*/\n  jint len = env->GetArrayLength(h264Frame);\n  jbyte data[len];\n  env->GetByteArrayRegion(h264Frame, 0, len, data);\n\n  aInputBuf = (uint8*)malloc(len);\n  memcpy(aInputBuf,(uint8*)data,len);\n  size = len;\n\n  // xxx pa try to read nal unit type\n  int nal_unit_type = (AVCNalUnitType)(aInputBuf[0] & 0x1F);\n  __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"nal_unit_type : %d\", nal_unit_type);\n\n  // xxx pa new switch based on incoming NAL Units instead of fixed state machine\n  // this is the only approach to react on SPS/PPS which are part of in-band parameter settings (sended in between)\n  switch (nal_unit_type){\n    case AVC_NALTYPE_SPS:\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"decode: AVC_NALTYPE_SPS\");\n\n    // xxx pa Reset decoder, prepare it for a new IDR frame.\n    // decoder->ResetAVCDecoder();\n    \n      // ===========>\n      if (decoder->DecodeSPS(aInputBuf,size)==AVCDEC_SUCCESS){\n        \n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"decode: state: SPS->PPS\");\n        state = PPS;\n      } else {\n        return 0;\n      }\n      break;\n    case AVC_NALTYPE_PPS:\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"decode: AVC_NALTYPE_PPS\");\n        \n        if (state != PPS) {\n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"BREAK not in state: PPS\");\n\n            break;\n        }\n      // ===========>\n      if (decoder->DecodePPS(aInputBuf,size)==AVCDEC_SUCCESS){\n      \n        state = SLICE;\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"decode: state: PPS->SLICE\");\n      } else {\n      \n        // xxx pa reset state to SPS lookup\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"decode: reset state: PPS->SPS\");\n\n        state = SPS;\n\n        return 0;\n      }\n      break;\n    case AVC_NALTYPE_IDR :\n        // xxx pa Reset decoder, prepare it for a new IDR frame.\n        decoder->ResetAVCDecoder();\n        // don't break\n    case AVC_NALTYPE_SLICE :\n\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"decode: AVC_NALTYPE_SLICE or AVC_NALTYPE_IDR\");\n\n        // ===========>\n        if ((status=decoder->DecodeAVCSlice(aInputBuf,&size))>AVCDEC_FAIL)\n        {\n        \n          // ===========>\n          // decoder->GetDecOutput(&indexFrame,&releaseFrame,&outVid);\n          // xxx pa react on dbp:: DPBInitBuffer return AVC_NO_BUFFER failures (which should not happen)\n          if ((decoder->GetDecOutput(&indexFrame,&releaseFrame,&outVid) == AVC_NO_BUFFER))\n          {\n            __android_log_print(ANDROID_LOG_ERROR, LOG_TAG,  \"decode: GetDecOutput failed\");\n            return 0;\n          }\n        \n          if (releaseFrame == 1){\n            // ===========>\n            decoder->AVC_FrameUnbind(indexFrame);\n          }\n\n          /* Copy result to YUV  array ! */\n          memcpy(aOutBuffer,outVid.YCbCr[0],iSrcWidth*iSrcHeight);\n          memcpy(aOutBuffer+(iSrcWidth*iSrcHeight),outVid.YCbCr[1],(iSrcWidth*iSrcHeight)/4);\n          memcpy(aOutBuffer+(iSrcWidth*iSrcHeight)+((iSrcWidth*iSrcHeight)/4),outVid.YCbCr[2],(iSrcWidth*iSrcHeight)/4);\n          \n          /* Create the output buffer */\n          uint32* resultBuffer= (uint32*) malloc(iSrcWidth*iSrcHeight*sizeof(uint32));\n          if (resultBuffer == NULL) \n            return 0;\n          \n          /**********  Convert to rgb  ***********/\n          convert(iSrcWidth,iSrcHeight,aOutBuffer,resultBuffer);\n          \n          /* Return Bitmap image */\n          (env)->SetIntArrayRegion(decoded, 0, iSrcWidth*iSrcHeight, (const jint*)resultBuffer);\n          free(resultBuffer);\n          \n        } else {\n          __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"status: %ld\",status);\n        }\n      break;\n    default:\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"decode: UNKNOWN NAL unit type: %d\", nal_unit_type);\n\n  }\n  return 1;\n}\n\n/**\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder\n * Method:    InitParser\n * Signature: (Ljava/lang/String;)I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_InitParser\n  (JNIEnv *env, jclass clazz, jstring pathToFile){\n\n  return 0;\n\n}\n\n/**\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser\n * Method:    DeinitParser\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_DeinitParser\n  (JNIEnv *env, jclass clazz){\n        parserInitialized = 0;\n        return release();\n}\n\n/**\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser\n * Method:    getVideoLength\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_getVideoLength\n  (JNIEnv *env, jclass clazz)\n{\n  jint videoLength = getVideoDuration();\n  return videoLength;\n}\n\n/**\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser\n * Method:    getVideoWidth\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_getVideoWidth\n  (JNIEnv *env, jclass clazz)\n{\n    return getVideoWidth();\n}\n\n/**\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser\n * Method:    getVideoHeight\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_getVideoHeight\n  (JNIEnv *env, jclass clazz)\n{\n    return getVideoHeight();\n}\n\n/**\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser\n * Method:    getVideoCoding\n * Signature: ()Ljava/lang/String;\n */\nJNIEXPORT jstring JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_getVideoCoding\n  (JNIEnv *env, jclass clazz)\n{\n  char* charVideoCoding = getVideoCodec();\n  jstring stringVideoCoding = (env)->NewStringUTF(charVideoCoding);\n  return stringVideoCoding;\n}\n\n/**\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser\n * Method:    getVideoSample\n * Signature: ([I)Lcom/orangelabs/rcs/core/ims/protocol/rtp/codec/video/VideoSample\n */\nJNIEXPORT jobject JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_getVideoSample\n  (JNIEnv *env, jclass clazz, jintArray Decoded)\n{\n        jobject object = NULL;\n        // Return created object\n        return object;\n}\n\n/**\n * This is called by the VM when the shared library is first loaded.\n */\njint JNI_OnLoad(JavaVM* vm, void* reserved) {\n    JNIEnv* env = NULL;\n    jint result = -1;\n    if (vm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) {\n        goto bail;\n    }\n    /* success -- return valid version number */\n    result = JNI_VERSION_1_4;\nbail:\n    return result;\n}\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/src/NativeH264Decoder.h",
    "content": "/* DO NOT EDIT THIS FILE - it is machine generated */\n#include <jni.h>\n/* Header for class com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder */\n\n#ifndef _Included_NativeH264Decoder\n#define _Included_NativeH264Decoder\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder\n * Method:    InitDecoder\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_InitDecoder\n  (JNIEnv *, jclass);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder\n * Method:    DeinitDecoder\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_DeinitDecoder\n  (JNIEnv *, jclass);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder\n * Method:    DecodeAndConvert\n * Signature: ([B[IJ)I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_DecodeAndConvert\n  (JNIEnv *, jclass, jbyteArray, jintArray);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder\n * Method:    InitParser\n * Signature: (Ljava/lang/String;)I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_InitParser\n  (JNIEnv *, jclass, jstring);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder\n * Method:    DeinitParser\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_DeinitParser\n  (JNIEnv *, jclass);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder\n * Method:    getVideoLength\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_getVideoLength\n  (JNIEnv *, jclass);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder\n * Method:    getVideoWidth\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_getVideoWidth\n  (JNIEnv *, jclass);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder\n * Method:    getVideoHeight\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_getVideoHeight\n  (JNIEnv *, jclass);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder\n * Method:    getVideoCoding\n * Signature: ()Ljava/lang/String;\n */\nJNIEXPORT jstring JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_getVideoCoding\n  (JNIEnv *, jclass);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder\n * Method:    getVideoSample\n * Signature: ([I)Lcom/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h264/decoder/VideoSample;\n */\nJNIEXPORT jobject JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_decoder_NativeH264Decoder_getVideoSample\n  (JNIEnv *, jclass, jintArray);\n\n#ifdef __cplusplus\n}\n#endif\n#endif\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/src/avc_bitstream.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"avcdec_bitstream.h\"\n\n/* Swapping may not be needed anymore since we read one byte at a time and perform\nEBSP to RBSP conversion in bitstream. */\n#ifdef LITTLE_ENDIAN\n#if (WORD_SIZE==32)  /* this can be replaced with assembly instructions */\n#define SWAP_BYTES(x) ((((x)&0xFF)<<24) | (((x)&0xFF00)<<8) | (((x)&0xFF0000)>>8) | (((x)&0xFF000000)>>24))\n#else  /* for 16-bit */\n#define SWAP_BYTES(x) ((((x)&0xFF)<<8) | (((x)&0xFF00)>>8))\n#endif\n#else\n#define SWAP_BYTES(x) (x)\n#endif\n\n\n/* array for trailing bit pattern as function of number of bits */\n/* the first one is unused. */\nconst static uint8 trailing_bits[9] = {0, 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80};\n\n/* ======================================================================== */\n/*  Function : BitstreamInit()                                              */\n/*  Date     : 11/4/2003                                                    */\n/*  Purpose  : Populate bitstream structure with bitstream buffer and size  */\n/*             it also initializes internal data                            */\n/*  In/out   :                                                              */\n/*  Return   : AVCDEC_SUCCESS if successed, AVCDEC_FAIL if failed.              */\n/*  Modified :                                                              */\n/* ======================================================================== */\n/* |--------|--------|----~~~~~-----|---------|---------|---------|\n   ^                                          ^read_pos           ^data_end_pos\n   bitstreamBuffer                  <--------->\n                                    current_word\n\n   |xxxxxxxxxxxxx----|  = current_word 32 or 16 bits\n    <------------>\n     bit_left\n ======================================================================== */\n\n\n/* ======================================================================== */\n/*  Function : BitstreamNextWord()                                          */\n/*  Date     : 12/4/2003                                                    */\n/*  Purpose  : Read up to machine word.                                     */\n/*  In/out   :                                                              */\n/*  Return   : Next word with emulation prevention code removed. Everything\n    in the bitstream structure got modified except current_word             */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nAVCDec_Status BitstreamInit(AVCDecBitstream *stream, uint8 *buffer, int size)\n{\n    EBSPtoRBSP(buffer, &size);\n\n    stream->incnt = 0;\n    stream->incnt_next = 0;\n    stream->bitcnt = 0;\n    stream->curr_word = stream->next_word = 0;\n    stream->read_pos = 0;\n\n    stream->bitstreamBuffer = buffer;\n\n    stream->data_end_pos = size;\n\n    stream->nal_size = size;\n\n    return AVCDEC_SUCCESS;\n}\n/* ======================================================================== */\n/*  Function : AVC_BitstreamFillCache()                                         */\n/*  Date     : 1/1/2005                                                     */\n/*  Purpose  : Read up to machine word.                                     */\n/*  In/out   :                                                              */\n/*  Return   : Read in 4 bytes of input data                                */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nAVCDec_Status AVC_BitstreamFillCache(AVCDecBitstream *stream)\n{\n    uint8 *bitstreamBuffer = stream->bitstreamBuffer;\n    uint8 *v;\n    int num_bits, i;\n\n    stream->curr_word |= (stream->next_word >> stream->incnt);   // stream->incnt cannot be 32\n    stream->next_word <<= (31 - stream->incnt);\n    stream->next_word <<= 1;\n    num_bits = stream->incnt_next + stream->incnt;\n    if (num_bits >= 32)\n    {\n        stream->incnt_next -= (32 - stream->incnt);\n        stream->incnt = 32;\n        return AVCDEC_SUCCESS;\n    }\n    /* this check can be removed if there is additional extra 4 bytes at the end of the bitstream */\n    v = bitstreamBuffer + stream->read_pos;\n\n    if (stream->read_pos > stream->data_end_pos - 4)\n    {\n        if (stream->data_end_pos <= stream->read_pos)\n        {\n            stream->incnt = num_bits;\n            stream->incnt_next = 0;\n            return AVCDEC_SUCCESS;\n        }\n\n        stream->next_word = 0;\n\n        for (i = 0; i < stream->data_end_pos - stream->read_pos; i++)\n        {\n            stream->next_word |= (v[i] << ((3 - i) << 3));\n        }\n\n        stream->read_pos = stream->data_end_pos;\n        stream->curr_word |= (stream->next_word >> num_bits); // this is safe\n\n        stream->next_word <<= (31 - num_bits);\n        stream->next_word <<= 1;\n        num_bits = i << 3;\n        stream->incnt += stream->incnt_next;\n        stream->incnt_next = num_bits - (32 - stream->incnt);\n        if (stream->incnt_next < 0)\n        {\n            stream->incnt +=  num_bits;\n            stream->incnt_next = 0;\n        }\n        else\n        {\n            stream->incnt = 32;\n        }\n        return AVCDEC_SUCCESS;\n    }\n\n    stream->next_word = ((uint32)v[0] << 24) | (v[1] << 16) | (v[2] << 8) | v[3];\n    stream->read_pos += 4;\n\n    stream->curr_word |= (stream->next_word >> num_bits); // this is safe\n    stream->next_word <<= (31 - num_bits);\n    stream->next_word <<= 1;\n    stream->incnt_next += stream->incnt;\n    stream->incnt = 32;\n    return AVCDEC_SUCCESS;\n\n}\n/* ======================================================================== */\n/*  Function : BitstreamReadBits()                                          */\n/*  Date     : 11/4/2003                                                    */\n/*  Purpose  : Read up to machine word.                                     */\n/*  In/out   :                                                              */\n/*  Return   : AVCDEC_SUCCESS if successed, AVCDEC_FAIL if number of bits   */\n/*              is greater than the word-size, AVCDEC_PACKET_LOSS or        */\n/*              AVCDEC_NO_DATA if callback to get data fails.               */\n/*  Modified :                                                              */\n/* ======================================================================== */\nAVCDec_Status BitstreamReadBits(AVCDecBitstream *stream, int nBits, uint *code)\n{\n    if (stream->incnt < nBits)\n    {\n        /* frame-based decoding */\n        AVC_BitstreamFillCache(stream);\n    }\n    *code = stream->curr_word >> (32 - nBits);\n    BitstreamFlushBits(stream, nBits);\n    return AVCDEC_SUCCESS;\n}\n\n\n\n/* ======================================================================== */\n/*  Function : BitstreamShowBits()                                          */\n/*  Date     : 11/4/2003                                                    */\n/*  Purpose  : Show up to machine word without advancing the pointer.       */\n/*  In/out   :                                                              */\n/*  Return   : AVCDEC_SUCCESS if successed, AVCDEC_FAIL if number of bits   */\n/*              is greater than the word-size, AVCDEC_NO_DATA if it needs   */\n/*              to callback to get data.                                    */\n/*  Modified :                                                              */\n/* ======================================================================== */\nAVCDec_Status BitstreamShowBits(AVCDecBitstream *stream, int nBits, uint *code)\n{\n    if (stream->incnt < nBits)\n    {\n        /* frame-based decoding */\n        AVC_BitstreamFillCache(stream);\n    }\n\n    *code = stream->curr_word >> (32 - nBits);\n\n    return AVCDEC_SUCCESS;\n}\n\n/* ======================================================================== */\n/*  Function : BitstreamRead1Bit()                                          */\n/*  Date     : 11/4/2003                                                    */\n/*  Purpose  : Read 1 bit from the bitstream.                               */\n/*  In/out   :                                                              */\n/*  Return   : AVCDEC_SUCCESS if successed, AVCDEC_FAIL if number of bits   */\n/*              is greater than the word-size, AVCDEC_PACKET_LOSS or        */\n/*              AVCDEC_NO_DATA if callback to get data fails.               */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nAVCDec_Status BitstreamRead1Bit(AVCDecBitstream *stream, uint *code)\n{\n    if (stream->incnt < 1)\n    {\n        /* frame-based decoding */\n        AVC_BitstreamFillCache(stream);\n    }\n    *code = stream->curr_word >> 31;\n    BitstreamFlushBits(stream, 1);\n    return AVCDEC_SUCCESS;\n}\n\n\n\nAVCDec_Status BitstreamByteAlign(AVCDecBitstream  *stream)\n{\n    uint n_stuffed;\n\n    n_stuffed = (8 - (stream->bitcnt & 0x7)) & 0x7; /*  07/05/01 */\n\n    stream->bitcnt += n_stuffed;\n    stream->incnt -= n_stuffed;\n\n    if (stream->incnt < 0)\n    {\n        stream->bitcnt += stream->incnt;\n        stream->incnt = 0;\n    }\n    stream->curr_word <<= n_stuffed;\n    return AVCDEC_SUCCESS;\n}\n\n/* check whether there are more RBSP data. */\n/* ignore the emulation prevention code, assume it has been taken out. */\nbool more_rbsp_data(AVCDecBitstream *stream)\n{\n    int total_bit_left;\n    uint code;\n\n    if (stream->read_pos >= stream->nal_size)\n    {\n        total_bit_left = stream->incnt_next + stream->incnt;\n        if (total_bit_left <= 0)\n        {\n            return FALSE;\n        }\n        else if (total_bit_left <= 8)\n        {\n            BitstreamShowBits(stream, total_bit_left, &code);\n            if (code == trailing_bits[total_bit_left])\n            {\n                return FALSE;\n            }\n        }\n    }\n\n    return TRUE;\n}\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/src/avcdec_api.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/**\nThis file contains application function interfaces to the AVC decoder library.\n@publishedAll\n*/\n\n#include \"oscl_types.h\"\n#include \"oscl_mem.h\"\n#include \"avcdec_api.h\"\n#include \"avcdec_lib.h\"\n#include \"avcdec_bitstream.h\"\n\n// xxx pa\n#define LOG_TAG \"avcdec_api\"\n#include \"android/log.h\"\n\n\n/* ======================================================================== */\n/*  Function : EBSPtoRBSP()                                                 */\n/*  Date     : 11/4/2003                                                    */\n/*  Purpose  : Convert EBSP to RBSP and overwrite it.                       */\n/*             Assuming that forbidden_zero, nal_ref_idc and nal_unit_type  */\n/*          (first byte), has been taken out of the nal_unit.               */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\n/**\n@pseudocode \"\n    NumBytesInRBSP = 0;\n    for(i=0:i< *size; i++){\n        if(i+2 < *size && next_bits(24)==0x000003){\n            rbsp_byte[NumBytesInRBSP++];\n            rbsp_byte[NumBytesInRBSP++];\n            i+=2;\n            emulation_prevention_three_byte (0x03)\n        }\n        else\n            rbsp_byte[NumBytesInRBSP++];\n    }\"\n*/\nAVCDec_Status EBSPtoRBSP(uint8 *nal_unit, int *size)\n{\n    int i, j;\n    int count = 0;\n\n    /* This code is based on EBSPtoRBSP of JM */\n    j = 0;\n\n    for (i = 0; i < *size; i++)\n    {\n        if (count == 2 && nal_unit[i] == 0x03)\n        {\n            i++;\n            count = 0;\n        }\n        nal_unit[j] = nal_unit[i];\n        if (nal_unit[i] == 0x00)\n            count++;\n        else\n            count = 0;\n        j++;\n    }\n\n    *size = j;\n\n    return AVCDEC_SUCCESS;\n}\n\n/* ======================================================================== */\n/*  Function : PVAVCAnnexBGetNALUnit()                                      */\n/*  Date     : 11/3/2003                                                    */\n/*  Purpose  : Parse a NAL from byte stream format.                         */\n/*  In/out   :                                                              */\n/*  Return   : AVCDEC_SUCCESS if succeed, AVC_FAIL if fail.                 */\n/*  Modified :                                                              */\n/* ======================================================================== */\n/**\n@pseudocode \"\n    byte_stream_nal_unit(NumBytesInNalunit){\n    while(next_bits(24) != 0x000001)\n        zero_byte\n    if(more_data_in_byte_stream()){\n        start_code_prefix_one_3bytes // equal 0x000001\n        nal_unit(NumBytesInNALunit)\n    }\n   }\"\n*/\nOSCL_EXPORT_REF AVCDec_Status PVAVCAnnexBGetNALUnit(uint8 *bitstream, uint8 **nal_unit,\n        int *size)\n{\n    int i, j, FoundStartCode = 0;\n    int end;\n\n    i = 0;\n    while (bitstream[i] == 0 && i < *size)\n    {\n        i++;\n    }\n    if (i >= *size)\n    {\n        *nal_unit = bitstream;\n        return AVCDEC_FAIL; /* cannot find any start_code_prefix. */\n    }\n    else if (bitstream[i] != 0x1)\n    {\n        i = -1;  /* start_code_prefix is not at the beginning, continue */\n    }\n\n    i++;\n    *nal_unit = bitstream + i; /* point to the beginning of the NAL unit */\n\n    j = end = i;\n    while (!FoundStartCode)\n    {\n        while ((j + 1 < *size) && (bitstream[j] != 0 || bitstream[j+1] != 0))  /* see 2 consecutive zero bytes */\n        {\n            j++;\n        }\n        end = j;   /* stop and check for start code */\n        while (j + 2 < *size && bitstream[j+2] == 0) /* keep reading for zero byte */\n        {\n            j++;\n        }\n        if (j + 2 >= *size)\n        {\n            *size -= i;\n            return AVCDEC_NO_NEXT_SC;  /* cannot find the second start_code_prefix */\n        }\n        if (bitstream[j+2] == 0x1)\n        {\n            FoundStartCode = 1;\n        }\n        else\n        {\n            /* could be emulation code 0x3 */\n            j += 2; /* continue the search */\n        }\n    }\n\n    *size = end - i;\n\n    return AVCDEC_SUCCESS;\n}\n\n/* ======================================================================== */\n/*  Function : PVAVCGetNALType()                                            */\n/*  Date     : 11/4/2003                                                    */\n/*  Purpose  : Sniff NAL type from the bitstream                            */\n/*  In/out   :                                                              */\n/*  Return   : AVCDEC_SUCCESS if succeed, AVC_FAIL if fail.                 */\n/*  Modified :                                                              */\n/* ======================================================================== */\nOSCL_EXPORT_REF AVCDec_Status PVAVCDecGetNALType(uint8 *bitstream, int size,\n        int *nal_type, int *nal_ref_idc)\n{\n    int forbidden_zero_bit;\n    if (size > 0)\n    {\n        forbidden_zero_bit = bitstream[0] >> 7;\n        if (forbidden_zero_bit != 0)\n            return AVCDEC_FAIL;\n        *nal_ref_idc = (bitstream[0] & 0x60) >> 5;\n        *nal_type = bitstream[0] & 0x1F;\n        return AVCDEC_SUCCESS;\n    }\n\n    return AVCDEC_FAIL;\n}\n\n/* ======================================================================== */\n/*  Function : PVAVCDecSeqParamSet()                                        */\n/*  Date     : 11/4/2003                                                    */\n/*  Purpose  : Initialize sequence, memory allocation if necessary.         */\n/*  In/out   :                                                              */\n/*  Return   : AVCDEC_SUCCESS if succeed, AVC_FAIL if fail.                 */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF AVCDec_Status   PVAVCDecSeqParamSet(AVCHandle *avcHandle, uint8 *nal_unit,\n        int nal_size)\n{\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecSeqParamSet\");\n\n    AVCDec_Status status;\n    AVCDecObject *decvid;\n    AVCCommonObj *video;\n    AVCDecBitstream *bitstream;\n    void *userData = avcHandle->userData;\n    bool  first_seq = FALSE;\n    int i;\n\n\n    DEBUG_LOG(userData, AVC_LOGTYPE_INFO, \"PVAVCDecSeqParamSet\", -1, -1);\n\n    if (avcHandle->AVCObject == NULL)\n    {\n    \n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecSeqParamSet (AVCObject == NULL)\");\n\n        first_seq = TRUE;\n\n        //avcHandle->memory_usage = 0;\n        /* allocate AVCDecObject */\n        avcHandle->AVCObject = (void*)avcHandle->CBAVC_Malloc(userData, sizeof(AVCDecObject), 0/*DEFAULT_ATTR*/);\n        if (avcHandle->AVCObject == NULL)\n        {\n        \n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecSeqParamSet (AVCObject == NULL) return: AVCDEC_MEMORY_FAIL\");\n\n            return AVCDEC_MEMORY_FAIL;\n        }\n\n        decvid = (AVCDecObject*) avcHandle->AVCObject;\n\n        oscl_memset(decvid, 0, sizeof(AVCDecObject));\n\n        decvid->common = (AVCCommonObj*)avcHandle->CBAVC_Malloc(userData, sizeof(AVCCommonObj), 0);\n        if (decvid->common == NULL)\n        {\n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecSeqParamSet (common == NULL) return: AVCDEC_MEMORY_FAIL\");\n\n            return AVCDEC_MEMORY_FAIL;\n        }\n\n        video = decvid->common;\n        oscl_memset(video, 0, sizeof(AVCCommonObj));\n\n        video->seq_parameter_set_id = 9999; /* set it to some illegal value */\n\n        decvid->bitstream = (AVCDecBitstream *) avcHandle->CBAVC_Malloc(userData, sizeof(AVCDecBitstream), 1/*DEFAULT_ATTR*/);\n        if (decvid->bitstream == NULL)\n        {\n        \n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecSeqParamSet (bitstream == NULL) return: AVCDEC_MEMORY_FAIL\");\n\n            return AVCDEC_MEMORY_FAIL;\n        }\n\n        decvid->bitstream->userData = avcHandle->userData; /* callback for more data */\n        decvid->avcHandle = avcHandle;\n        decvid->debugEnable = avcHandle->debugEnable;\n    }\n\n    decvid = (AVCDecObject*) avcHandle->AVCObject;\n    video = decvid->common;\n    bitstream = decvid->bitstream;\n\n    /* check if we can reuse the memory without re-allocating it. */\n    /* always check if(first_seq==TRUE) */\n\n    /* Conversion from EBSP to RBSP */\n    video->forbidden_bit = nal_unit[0] >> 7;\n    \n    if (video->forbidden_bit) \n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecSeqParamSet (forbidden_bit) return: AVCDEC_FAIL\");\n        return AVCDEC_FAIL;\n    }\n    \n    video->nal_ref_idc = (nal_unit[0] & 0x60) >> 5;\n    video->nal_unit_type = (AVCNalUnitType)(nal_unit[0] & 0x1F);\n\n    if (video->nal_unit_type != AVC_NALTYPE_SPS) /* not a SPS NAL */\n    {\n    \n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecSeqParamSet (nal_unit_type != AVC_NALTYPE_SPS) return: AVCDEC_FAIL\");\n\n        return AVCDEC_FAIL;\n    }\n\n    /* Initialize bitstream structure*/\n    BitstreamInit(bitstream, nal_unit + 1, nal_size - 1);\n\n    /* if first_seq == TRUE, allocate the following memory  */\n    if (first_seq == TRUE)\n    {\n        video->currSeqParams = NULL; /* initialize it to NULL */\n        video->currPicParams = NULL;\n\n        /* There are 32 pointers to sequence param set, seqParams.\n                There are 255 pointers to picture param set, picParams.*/\n        for (i = 0; i < 32; i++)\n            decvid->seqParams[i] = NULL;\n\n        for (i = 0; i < 256; i++)\n            decvid->picParams[i] = NULL;\n\n        video->MbToSliceGroupMap = NULL;\n\n        video->mem_mgr_ctrl_eq_5 = FALSE;\n        video->newPic = TRUE;        \n        video->newSlice = TRUE;\n        \n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecSeqParamSet video->newSlice = TRUE\");\n\n        video->currPic = NULL;\n        video->currFS = NULL;\n        video->prevRefPic = NULL;\n\n        video->mbNum = 0; // MC_Conceal\n        /*  Allocate sliceHdr. */\n\n        video->sliceHdr = (AVCSliceHeader*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCSliceHeader), 5/*DEFAULT_ATTR*/);\n        if (video->sliceHdr == NULL)\n        {\n        \n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecSeqParamSet (sliceHdr == NULL) return: AVCDEC_FAIL\");\n\n            return AVCDEC_MEMORY_FAIL;\n        }\n\n        video->decPicBuf = (AVCDecPicBuffer*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCDecPicBuffer), 3/*DEFAULT_ATTR*/);\n        if (video->decPicBuf == NULL)\n        {\n            \n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecSeqParamSet (decPicBuf == NULL) return: AVCDEC_FAIL\");\n\n            return AVCDEC_MEMORY_FAIL;\n        }\n        oscl_memset(video->decPicBuf, 0, sizeof(AVCDecPicBuffer));\n    }\n\n    /* Decode SPS, allocate video->seqParams[i] and assign video->currSeqParams */\n    status = DecodeSPS(decvid, bitstream);\n\n    if (status != AVCDEC_SUCCESS)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecSeqParamSet (status != AVCDEC_SUCCESS) return: status: %d\", status);\n\n        return status;\n    }\n    \n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecSeqParamSet return: status: AVCDEC_SUCCESS\");\n\n    return AVCDEC_SUCCESS;\n}\n\n/* ======================================================================== */\n/*  Function : PVAVCDecGetSeqInfo()                                         */\n/*  Date     : 11/4/2003                                                    */\n/*  Purpose  : Get sequence parameter info of the last decoded SPS          */\n/*  In/out   :                                                              */\n/*  Return   : AVCDEC_SUCCESS if succeed, AVC_FAIL if fail.                 */\n/*  Modified :                                                              */\n/*  12/20/03:  change input argument, use structure instead.                */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF AVCDec_Status PVAVCDecGetSeqInfo(AVCHandle *avcHandle, AVCDecSPSInfo *seqInfo)\n{\n    AVCDecObject *decvid = (AVCDecObject*) avcHandle->AVCObject;\n    AVCCommonObj *video;\n    int PicWidthInMbs, PicHeightInMapUnits, FrameHeightInMbs;\n\n    if (decvid == NULL || decvid->lastSPS == NULL)\n    {\n        return AVCDEC_FAIL;\n    }\n\n    video = decvid->common;\n\n    PicWidthInMbs = decvid->lastSPS->pic_width_in_mbs_minus1 + 1;\n    PicHeightInMapUnits = decvid->lastSPS->pic_height_in_map_units_minus1 + 1 ;\n    FrameHeightInMbs = (2 - decvid->lastSPS->frame_mbs_only_flag) * PicHeightInMapUnits ;\n\n    seqInfo->FrameWidth = PicWidthInMbs << 4;\n    seqInfo->FrameHeight = FrameHeightInMbs << 4;\n\n    seqInfo->frame_only_flag = decvid->lastSPS->frame_mbs_only_flag;\n\n    if (decvid->lastSPS->frame_cropping_flag)\n    {\n        seqInfo->frame_crop_left = 2 * decvid->lastSPS->frame_crop_left_offset;\n        seqInfo->frame_crop_right = seqInfo->FrameWidth - (2 * decvid->lastSPS->frame_crop_right_offset + 1);\n\n        if (seqInfo->frame_only_flag)\n        {\n            seqInfo->frame_crop_top = 2 * decvid->lastSPS->frame_crop_top_offset;\n            seqInfo->frame_crop_bottom = seqInfo->FrameHeight - (2 * decvid->lastSPS->frame_crop_bottom_offset + 1);\n            /* Note in 7.4.2.1, there is a contraint on the value of frame_crop_left and frame_crop_top\n            such that they have to be less than or equal to frame_crop_right/2 and frame_crop_bottom/2, respectively. */\n        }\n        else\n        {\n            seqInfo->frame_crop_top = 4 * decvid->lastSPS->frame_crop_top_offset;\n            seqInfo->frame_crop_bottom = seqInfo->FrameHeight - (4 * decvid->lastSPS->frame_crop_bottom_offset + 1);\n            /* Note in 7.4.2.1, there is a contraint on the value of frame_crop_left and frame_crop_top\n            such that they have to be less than or equal to frame_crop_right/2 and frame_crop_bottom/4, respectively. */\n        }\n    }\n    else  /* no cropping flag, just give the first and last pixel */\n    {\n        seqInfo->frame_crop_bottom = seqInfo->FrameHeight - 1;\n        seqInfo->frame_crop_right = seqInfo->FrameWidth - 1;\n        seqInfo->frame_crop_top = seqInfo->frame_crop_left = 0;\n    }\n\n    seqInfo->num_frames = (uint32)(MaxDPBX2[(uint32)mapLev2Idx[decvid->lastSPS->level_idc]] << 2) / (3 * PicWidthInMbs * PicHeightInMapUnits) + 1;\n\n    if (seqInfo->num_frames  >= MAX_FS)\n    {\n        seqInfo->num_frames  = MAX_FS;\n    }\n\n    return AVCDEC_SUCCESS;\n}\n\n/* ======================================================================== */\n/*  Function : PVAVCDecPicParamSet()                                        */\n/*  Date     : 11/4/2003                                                    */\n/*  Purpose  : Initialize picture                                           */\n/*             create reference picture list.                               */\n/*  In/out   :                                                              */\n/*  Return   : AVCDEC_SUCCESS if succeed, AVC_FAIL if fail.                 */\n/*  Modified :                                                              */\n/* ======================================================================== */\n/**\nSince PPS doesn't contain much data, most of the picture initialization will\nbe done after decoding the slice header in PVAVCDecodeSlice. */\nOSCL_EXPORT_REF AVCDec_Status   PVAVCDecPicParamSet(AVCHandle *avcHandle, uint8 *nal_unit,\n        int nal_size)\n{\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecPicParamSet\");\n\n    AVCDec_Status status;\n    AVCDecObject *decvid = (AVCDecObject*) avcHandle->AVCObject;\n    AVCCommonObj *video;\n    AVCDecBitstream *bitstream;\n\n    if (decvid == NULL)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecPicParamSet return: AVCDEC_FAIL\");\n\n        return AVCDEC_FAIL;\n    }\n\n    video = decvid->common;\n    bitstream = decvid->bitstream;\n    /* 1. Convert EBSP to RBSP. Create bitstream structure */\n    video->forbidden_bit = nal_unit[0] >> 7;\n    video->nal_ref_idc = (nal_unit[0] & 0x60) >> 5;\n    video->nal_unit_type = (AVCNalUnitType)(nal_unit[0] & 0x1F);\n\n    if (video->nal_unit_type != AVC_NALTYPE_PPS) /* not a PPS NAL */\n    {\n    \n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecPicParamSet (nal_unit_type != AVC_NALTYPE_PPS) return: AVCDEC_FAIL\");\n\n        return AVCDEC_FAIL;\n    }\n\n\n    /* 2. Initialize bitstream structure*/\n    BitstreamInit(bitstream, nal_unit + 1, nal_size - 1);\n\n    /* 2. Decode pic_parameter_set_rbsp syntax. Allocate video->picParams[i] and assign to currPicParams */\n    status = DecodePPS(decvid, video, bitstream);\n    if (status != AVCDEC_SUCCESS)\n    {    \n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecPicParamSet (status != AVCDEC_SUCCESS) return: status: %d\", status);\n\n        return status;\n    }\n\n    video->SliceGroupChangeRate = video->currPicParams->slice_group_change_rate_minus1 + 1 ;\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecPicParamSet return: status: AVCDEC_SUCCESS\");\n\n    return AVCDEC_SUCCESS;\n}\n\nOSCL_EXPORT_REF AVCDec_Status   PVAVCDecSEI(AVCHandle *avcHandle, uint8 *nal_unit,\n        int nal_size)\n{\n    OSCL_UNUSED_ARG(avcHandle);\n    OSCL_UNUSED_ARG(nal_unit);\n    OSCL_UNUSED_ARG(nal_size);\n\n    return AVCDEC_SUCCESS;\n}\n/* ======================================================================== */\n/*  Function : PVAVCDecodeSlice()                                           */\n/*  Date     : 11/4/2003                                                    */\n/*  Purpose  : Decode one NAL unit.                                         */\n/*  In/out   :                                                              */\n/*  Return   : See enum AVCDec_Status for return values.                    */\n/*  Modified :                                                              */\n/* ======================================================================== */\nOSCL_EXPORT_REF AVCDec_Status PVAVCDecodeSlice(AVCHandle *avcHandle, uint8 *buffer,\n        int buf_size)\n{\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice\");\n\n    AVCDecObject *decvid = (AVCDecObject*) avcHandle->AVCObject;\n    AVCCommonObj *video;\n    AVCDecBitstream *bitstream;\n    AVCDec_Status status;\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice --1\");\n\n    if (decvid == NULL)\n    {\n    \n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice (decvid == NULL) return: AVCDEC_FAIL\");\n\n        return AVCDEC_FAIL;\n    }\n\n\n    video = decvid->common;\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice --2 video->newSlice: %d\", video->newSlice);\n\n    \n    bitstream = decvid->bitstream;\n\n    if (video->mem_mgr_ctrl_eq_5)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice (video->mem_mgr_ctrl_eq_5) return: AVCDEC_PICTURE_OUTPUT_READY\");\n\n        return AVCDEC_PICTURE_OUTPUT_READY;      // to flushout frame buffers\n    }\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice --3\");\n\n    if (video->newSlice)\n    {\n        /* 2. Check NAL type  */\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice 2. Check NAL type\");\n        \n        if (buffer == NULL)\n        {\n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice (buffer == NULL) return: AVCDEC_FAIL\");\n            \n            return AVCDEC_FAIL;\n        }\n        video->prev_nal_unit_type = video->nal_unit_type;\n        video->forbidden_bit = buffer[0] >> 7;\n        video->nal_ref_idc = (buffer[0] & 0x60) >> 5;\n        video->nal_unit_type = (AVCNalUnitType)(buffer[0] & 0x1F);\n\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice nal_unit_type =%d\", video->nal_unit_type);\n\n        if (video->nal_unit_type == AVC_NALTYPE_AUD)\n        {\n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice (nal_unit_type == AVC_NALTYPE_AUD) return: AVCDEC_SUCCESS\");\n            \n            return AVCDEC_SUCCESS;\n        }\n\n        if (video->nal_unit_type != AVC_NALTYPE_SLICE &&\n                video->nal_unit_type != AVC_NALTYPE_IDR)\n        {\n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice (nal_unit_type != AVC_NALTYPE_IDR) return: AVCDEC_NOT_SUPPORTED\");\n\n            return AVCDEC_NOT_SUPPORTED; /* not supported */\n        }\n\n\n\n        if (video->nal_unit_type >= 2 && video->nal_unit_type <= 4)\n        {\n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice (video->nal_unit_type >= 2 && <= 4) return: AVCDEC_NOT_SUPPORTED\");\n\n            return AVCDEC_NOT_SUPPORTED; /* not supported */\n        }\n        else\n        {\n            video->slice_data_partitioning = FALSE;\n        }\n\n        video->newSlice = FALSE;\n        /*  Initialize bitstream structure*/\n        BitstreamInit(bitstream, buffer + 1, buf_size - 1);\n\n\n        /* 2.1 Decode Slice Header (separate function)*/\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice 2.1 Decode Slice Header\");\n\n        status = DecodeSliceHeader(decvid, video, bitstream);\n        if (status != AVCDEC_SUCCESS)\n        {\n            video->newSlice = TRUE;\n            \n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice (status != AVCDEC_SUCCESS) return: status: %d\", status);\n\n            return status;\n        }\n\n        if (video->sliceHdr->frame_num != video->prevFrameNum || (video->sliceHdr->first_mb_in_slice < (uint)video->mbNum && video->currSeqParams->constrained_set1_flag == 1))\n        {\n            video->newPic = TRUE;\n            if (video->numMBs > 0)\n            {\n                // Conceal missing MBs of previously decoded frame\n                ConcealSlice(decvid, video->PicSizeInMbs - video->numMBs, video->PicSizeInMbs);  // Conceal\n                video->numMBs = 0;\n\n                //              DeblockPicture(video);   // No need to deblock\n\n                /* 3.2 Decoded frame reference marking. */\n                /* 3.3 Put the decoded picture in output buffers */\n                /* set video->mem_mge_ctrl_eq_5 */\n                AVCNalUnitType temp = video->nal_unit_type;\n                video->nal_unit_type = video->prev_nal_unit_type;\n                StorePictureInDPB(avcHandle, video);\n                video->nal_unit_type = temp;\n                video->mbNum = 0; // MC_Conceal\n                \n                __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice (video->numMBs > 0) return: AVCDEC_PICTURE_OUTPUT_READY\");\n\n                return AVCDEC_PICTURE_OUTPUT_READY;\n            }\n        }\n\n        if (video->nal_unit_type == AVC_NALTYPE_IDR)\n        {\n            video->prevFrameNum = 0;\n            video->PrevRefFrameNum = 0;\n        }\n\n        if (!video->currSeqParams->gaps_in_frame_num_value_allowed_flag)\n        {   /* no gaps allowed, frame_num has to increase by one only */\n            /*          if(sliceHdr->frame_num != (video->PrevRefFrameNum + 1)%video->MaxFrameNum) */\n            if (video->sliceHdr->frame_num != video->PrevRefFrameNum && video->sliceHdr->frame_num != (video->PrevRefFrameNum + 1) % video->MaxFrameNum)\n            {\n                // Conceal missing MBs of previously decoded frame\n                video->numMBs = 0;\n                video->newPic = TRUE;\n                video->prevFrameNum++; // FIX\n                video->PrevRefFrameNum++;\n                AVCNalUnitType temp = video->nal_unit_type;\n                video->nal_unit_type = AVC_NALTYPE_SLICE; //video->prev_nal_unit_type;\n                status = (AVCDec_Status)DPBInitBuffer(avcHandle, video);\n                if (status != AVCDEC_SUCCESS)\n                {\n                    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice gaps_in_frame_num_value_allowed_flag DPBInitBuffer (status != AVCDEC_SUCCESS) return: status: %d\", status);\n\n                    return status;\n                }\n                video->currFS->IsOutputted = 0x01;\n                video->currFS->IsReference = 3;\n                video->currFS->IsLongTerm = 0;\n\n                DecodePOC(video);\n                /* find an empty memory from DPB and assigned to currPic */\n                DPBInitPic(video, video->PrevRefFrameNum % video->MaxFrameNum);\n                RefListInit(video);\n                ConcealSlice(decvid, 0, video->PicSizeInMbs);  // Conceal\n                video->currFS->IsOutputted |= 0x02;\n                //conceal frame\n                /* 3.2 Decoded frame reference marking. */\n                /* 3.3 Put the decoded picture in output buffers */\n                /* set video->mem_mge_ctrl_eq_5 */\n                video->mbNum = 0; // Conceal\n                StorePictureInDPB(avcHandle, video);\n                video->nal_unit_type = temp;\n                \n                __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice gaps_in_frame_num_value_allowed_flag return: AVCDEC_PICTURE_OUTPUT_READY\");\n\n                return AVCDEC_PICTURE_OUTPUT_READY;\n            }\n        }\n    }\n    \n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice --4\");\n\n\n    if (video->newPic == TRUE)\n    {\n        status = (AVCDec_Status)DPBInitBuffer(avcHandle, video);\n        if (status != AVCDEC_SUCCESS)\n        {\n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice DPBInitBuffer->(newPic == TRUE) return: status: %d\", status);\n\n            return status;\n        }\n    }\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice --5\");\n\n    video->newSlice = TRUE;\n\n    /* function pointer setting at slice-level */\n    // OPTIMIZE\n    decvid->residual_block = &residual_block_cavlc;\n\n    /* derive picture order count */\n    if (video->newPic == TRUE)\n    {\n    \n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice --5.1\");\n\n        video->numMBs = video->PicSizeInMbs;\n\n        if (video->nal_unit_type != AVC_NALTYPE_IDR && video->currSeqParams->gaps_in_frame_num_value_allowed_flag)\n        {\n            if (video->sliceHdr->frame_num != (video->PrevRefFrameNum + 1) % video->MaxFrameNum)\n            {\n                status = fill_frame_num_gap(avcHandle, video);\n                if (status != AVCDEC_SUCCESS)\n                {\n                    video->numMBs = 0;\n                    \n                    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice fill_frame_num_gap->(status != AVCDEC_SUCCESS) return: status: %d\", status);\n\n                    return status;\n                }\n\n                status = (AVCDec_Status)DPBInitBuffer(avcHandle, video);\n                if (status != AVCDEC_SUCCESS)\n                {\n                    video->numMBs = 0;\n\n                    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice DPBInitBuffer->(status != AVCDEC_SUCCESS) return: status: %d\", status);\n\n                    return status;\n                }\n\n\n            }\n        }\n        /* if there's gap in the frame_num, we have to fill in the gap with\n            imaginary frames that won't get used for short-term ref. */\n        /* see fill_frame_num_gap() in JM */\n\n\n        DecodePOC(video);\n        /* find an empty memory from DPB and assigned to currPic */\n        DPBInitPic(video, video->CurrPicNum);\n\n        video->currPic->isReference = TRUE;  // FIX\n\n        if (video->nal_ref_idc == 0)\n        {\n            video->currPic->isReference = FALSE;\n            video->currFS->IsOutputted |= 0x02;     /* The MASK 0x02 means not needed for reference, or returned */\n            /* node need to check for freeing of this buffer */\n        }\n\n        FMOInit(video);\n\n        if (video->currPic->isReference)\n        {\n            video->PrevRefFrameNum = video->sliceHdr->frame_num;\n        }\n\n\n        video->prevFrameNum = video->sliceHdr->frame_num;\n    }\n\n    \n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice --6\");\n\n    video->newPic = FALSE;\n\n\n    /* Initialize refListIdx for this picture */\n    RefListInit(video);\n\n    /* Re-order the reference list according to the ref_pic_list_reordering() */\n    status = (AVCDec_Status)ReOrderList(video);\n    if (status != AVCDEC_SUCCESS)\n    {\n    \n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice ReOrderList->(status != AVCDEC_SUCCESS) return: status: AVCDEC_FAIL\");\n\n        return AVCDEC_FAIL;\n    }\n\n    /* 2.2 Decode Slice. */\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice 2.2 Decode Slice\");\n\n    status = (AVCDec_Status)DecodeSlice(decvid);\n\n    video->slice_id++;  //  slice\n\n    if (status == AVCDEC_PICTURE_READY)\n    {\n        /* 3. Check complete picture */\n#ifndef MB_BASED_DEBLOCK\n        /* 3.1 Deblock */\n        DeblockPicture(video);\n#endif\n        /* 3.2 Decoded frame reference marking. */\n        /* 3.3 Put the decoded picture in output buffers */\n        /* set video->mem_mge_ctrl_eq_5 */\n        \n        // xxx pa call to AVC_FRAME_UNBIND\n        status = (AVCDec_Status)StorePictureInDPB(avcHandle, video);          // CHECK check the retunr status\n        \n        if (status != AVCDEC_SUCCESS)\n        {\n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice StorePictureInDPB->(status != AVCDEC_SUCCESS) return: status: AVCDEC_FAIL\");\n\n            return AVCDEC_FAIL;\n        }\n\n        if (video->mem_mgr_ctrl_eq_5)\n        {\n            video->PrevRefFrameNum = 0;\n            video->prevFrameNum = 0;\n            video->prevPicOrderCntMsb = 0;\n            video->prevPicOrderCntLsb = video->TopFieldOrderCnt;\n            video->prevFrameNumOffset = 0;\n        }\n        else\n        {\n            video->prevPicOrderCntMsb = video->PicOrderCntMsb;\n            video->prevPicOrderCntLsb = video->sliceHdr->pic_order_cnt_lsb;\n            video->prevFrameNumOffset = video->FrameNumOffset;\n        }\n\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice (status == AVCDEC_PICTURE_READY) return: status: AVCDEC_PICTURE_READY\");\n\n        return AVCDEC_PICTURE_READY;\n    }\n    else if (status != AVCDEC_SUCCESS)\n    {\n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice (status != AVCDEC_SUCCESS) return: status: AVCDEC_FAIL\");\n\n        return AVCDEC_FAIL;\n    }\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecodeSlice final return: status: AVCDEC_SUCCESS\");\n\n    return AVCDEC_SUCCESS;\n}\n\n/* ======================================================================== */\n/*  Function : PVAVCDecGetOutput()                                          */\n/*  Date     : 11/3/2003                                                    */\n/*  Purpose  : Get the next picture according to PicOrderCnt.               */\n/*  In/out   :                                                              */\n/*  Return   : AVCFrameIO structure                                         */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF AVCDec_Status PVAVCDecGetOutput(AVCHandle *avcHandle, int *indx, int *release, AVCFrameIO *output)\n{\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecGetOutput\");\n\n    AVCDecObject *decvid = (AVCDecObject*) avcHandle->AVCObject;\n    AVCCommonObj *video;\n    AVCDecPicBuffer *dpb;\n    AVCFrameStore *oldestFrame = NULL;\n    int i, first = 1;\n    int count_frame = 0;\n    int index = 0;\n    int min_poc = 0;\n\n    if (decvid == NULL)\n    {\n\t  __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"(decvid == NULL) return AVCDEC_FAIL\");\n        return AVCDEC_FAIL;\n    }\n\n    video = decvid->common;\n    dpb = video->decPicBuf;\n\n    if (dpb->num_fs == 0)\n    {\n\t  __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"(num_fs == 0) return AVCDEC_FAIL\");\n        return AVCDEC_FAIL;\n    }\n\n    /* search for the oldest frame_num in dpb */\n    /* extension to field decoding, we have to search for every top_field/bottom_field within\n    each frame in the dpb. This code only works for frame based.*/\n\n    if (video->mem_mgr_ctrl_eq_5 == FALSE)\n    {\n        for (i = 0; i < dpb->num_fs; i++)\n        {\n            if ((dpb->fs[i]->IsOutputted & 0x01) == 0)\n            {\n                count_frame++;\n                if (first)\n                {\n                    min_poc = dpb->fs[i]->PicOrderCnt;\n                    first = 0;\n                    oldestFrame = dpb->fs[i];\n                    index = i;\n                }\n                if (dpb->fs[i]->PicOrderCnt < min_poc)\n                {\n                    min_poc = dpb->fs[i]->PicOrderCnt;\n                    oldestFrame = dpb->fs[i];\n                    index = i;\n                }\n            }\n        }\n    }\n    else\n    {\n        for (i = 0; i < dpb->num_fs; i++)\n        {\n            if ((dpb->fs[i]->IsOutputted & 0x01) == 0 && dpb->fs[i] != video->currFS)\n            {\n                count_frame++;\n                if (first)\n                {\n                    min_poc = dpb->fs[i]->PicOrderCnt;\n                    first = 0;\n                    oldestFrame = dpb->fs[i];\n                    index = i;\n                }\n                if (dpb->fs[i]->PicOrderCnt < min_poc)\n                {\n                    min_poc = dpb->fs[i]->PicOrderCnt;\n                    oldestFrame = dpb->fs[i];\n                    index = i;\n                }\n            }\n        }\n\n        if (count_frame < 2 && video->nal_unit_type != AVC_NALTYPE_IDR)\n        {\n            video->mem_mgr_ctrl_eq_5 = FALSE;  // FIX\n        }\n        else if (count_frame < 1 && video->nal_unit_type == AVC_NALTYPE_IDR)\n        {\n            for (i = 0; i < dpb->num_fs; i++)\n            {\n                if (dpb->fs[i] == video->currFS && (dpb->fs[i]->IsOutputted & 0x01) == 0)\n                {\n                    oldestFrame = dpb->fs[i];\n                    index = i;\n                    break;\n                }\n            }\n            video->mem_mgr_ctrl_eq_5 = FALSE;\n        }\n    }\n\n    if (oldestFrame == NULL)\n    {\n\n        /*      Check for Mem_mgmt_operation_5 based forced output */\n        for (i = 0; i < dpb->num_fs; i++)\n        {\n            /* looking for the one not used or not reference and has been outputted */\n            if (dpb->fs[i]->IsReference == 0 && dpb->fs[i]->IsOutputted == 3)\n            {\n                break;\n            }\n        }\n        if (i < dpb->num_fs)\n        {\n\t        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"(i < dpb->num_fs) return AVCDEC_FAIL\");\n            /* there are frames available for decoding */\n            return AVCDEC_FAIL; /* no frame to be outputted */\n        }\n\n\n        /* no free frame available, we have to release one to continue decoding */\n        int MinIdx = 0;\n        int32 MinFrameNumWrap = 0x7FFFFFFF;\n\n        for (i = 0; i < dpb->num_fs; i++)\n        {\n            if (dpb->fs[i]->IsReference && !dpb->fs[i]->IsLongTerm)\n            {\n                if (dpb->fs[i]->FrameNumWrap < MinFrameNumWrap)\n                {\n                    MinFrameNumWrap = dpb->fs[i]->FrameNumWrap;\n                    MinIdx = i;\n                }\n            }\n        }\n        /* mark the frame with smallest PicOrderCnt to be unused for reference */\n        dpb->fs[MinIdx]->IsReference = 0;\n        dpb->fs[MinIdx]->IsLongTerm = 0;\n        dpb->fs[MinIdx]->frame.isReference = FALSE;\n        dpb->fs[MinIdx]->frame.isLongTerm = FALSE;\n        dpb->fs[MinIdx]->IsOutputted |= 0x02;\n#ifdef PV_MEMORY_POOL\n        if (dpb->fs[MinIdx]->IsOutputted == 3)\n        {\n            avcHandle->CBAVC_FrameUnbind(avcHandle->userData, MinIdx);\n        }\n#endif\n\t    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"(oldestFrame == NULL ?!?) return AVCDEC_FAIL\");\n        return AVCDEC_FAIL;\n    }\n    \n    /* MASK 0x01 means the frame is outputted (for display). A frame gets freed when it is\n    outputted (0x01) and not needed for reference (0x02)   */\n    oldestFrame->IsOutputted |= 0x01;\n\n    if (oldestFrame->IsOutputted == 3)\n    {\n        *release = 1; /* flag to release the buffer */\n    }\n    else\n    {\n        *release = 0;\n    }\n    /* do not release buffer here, release it after it is sent to the sink node */\n\n    output->YCbCr[0] = oldestFrame->frame.Sl;\n    output->YCbCr[1] = oldestFrame->frame.Scb;\n    output->YCbCr[2] = oldestFrame->frame.Scr;\n    output->height = oldestFrame->frame.height;\n    output->pitch = oldestFrame->frame.width;\n    output->disp_order = oldestFrame->PicOrderCnt;\n    output->coding_order = oldestFrame->FrameNum;\n    output->id = (uint32) oldestFrame->base_dpb; /* use the pointer as the id */\n    *indx = index;\n\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"final return AVCDEC_SUCCESS\");\n    return AVCDEC_SUCCESS;\n}\n\n\n/* ======================================================================== */\n/*  Function : PVAVCDecReset()                                              */\n/*  Date     : 03/04/2004                                                   */\n/*  Purpose  : Reset decoder, prepare it for a new IDR frame.               */\n/*  In/out   :                                                              */\n/*  Return   :  void                                                        */\n/*  Modified :                                                              */\n/* ======================================================================== */\nOSCL_EXPORT_REF void    PVAVCDecReset(AVCHandle *avcHandle)\n{\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecReset\");\n\n    AVCDecObject *decvid = (AVCDecObject*) avcHandle->AVCObject;\n    AVCCommonObj *video;\n    AVCDecPicBuffer *dpb;\n    int i;\n\n    if (decvid == NULL)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecReset decvid == NULL\");\n\n        return;\n    }\n\n    video = decvid->common;\n    \n    dpb = video->decPicBuf;\n\n\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecReset --1 dpb->num_fs: %d\", dpb->num_fs);\n\n    /* reset the DPB */\n    for (i = 0; i < dpb->num_fs; i++)\n    {\n        dpb->fs[i]->IsLongTerm = 0;\n        dpb->fs[i]->IsReference = 0;\n        dpb->fs[i]->IsOutputted = 3;\n        dpb->fs[i]->frame.isReference = 0;\n        dpb->fs[i]->frame.isLongTerm = 0;\n        \n// xxx pa like dpb:StorePictureInDPB try to hold iFrameUsed structure in sync!\n#ifdef PV_MEMORY_POOL\n        avcHandle->CBAVC_FrameUnbind(avcHandle->userData, i);\n#endif\n\n    }\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCDecReset --2\");\n\n    video->mem_mgr_ctrl_eq_5 = FALSE;\n    video->newPic = TRUE;\n    video->newSlice = TRUE;\n    video->currPic = NULL;\n    video->currFS = NULL;\n    video->prevRefPic = NULL;\n    video->prevFrameNum = 0;\n    video->PrevRefFrameNum = 0;\n    video->prevFrameNumOffset = 0;\n    video->FrameNumOffset = 0;\n    video->mbNum = 0;\n    video->numMBs = 0;\n\n    return ;\n}\n\n\n/* ======================================================================== */\n/*  Function : PVAVCCleanUpDecoder()                                        */\n/*  Date     : 11/4/2003                                                    */\n/*  Purpose  : Clean up the decoder, free all memories allocated.           */\n/*  In/out   :                                                              */\n/*  Return   :  void                                                        */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF void PVAVCCleanUpDecoder(AVCHandle *avcHandle)\n{\n    AVCDecObject *decvid = (AVCDecObject*) avcHandle->AVCObject;\n    AVCCommonObj *video;\n    void *userData = avcHandle->userData;\n    int i;\n\n    DEBUG_LOG(userData, AVC_LOGTYPE_INFO, \"PVAVCCleanUpDecoder\", -1, -1);\n\n    if (decvid != NULL)\n    {\n        video = decvid->common;\n        if (video != NULL)\n        {\n            if (video->MbToSliceGroupMap != NULL)\n            {\n                avcHandle->CBAVC_Free(userData, (int)video->MbToSliceGroupMap);\n            }\n\n#ifdef MB_BASED_DEBLOCK\n            if (video->intra_pred_top != NULL)\n            {\n                avcHandle->CBAVC_Free(userData, (int)video->intra_pred_top);\n            }\n            if (video->intra_pred_top_cb != NULL)\n            {\n                avcHandle->CBAVC_Free(userData, (int)video->intra_pred_top_cb);\n            }\n            if (video->intra_pred_top_cr != NULL)\n            {\n                avcHandle->CBAVC_Free(userData, (int)video->intra_pred_top_cr);\n            }\n#endif\n            if (video->mblock != NULL)\n            {\n                avcHandle->CBAVC_Free(userData, (int)video->mblock);\n            }\n\n            if (video->decPicBuf != NULL)\n            {\n                CleanUpDPB(avcHandle, video);\n                avcHandle->CBAVC_Free(userData, (int)video->decPicBuf);\n            }\n\n            if (video->sliceHdr != NULL)\n            {\n                avcHandle->CBAVC_Free(userData, (int)video->sliceHdr);\n            }\n\n            avcHandle->CBAVC_Free(userData, (int)video); /* last thing to do */\n\n        }\n\n        for (i = 0; i < 256; i++)\n        {\n            if (decvid->picParams[i] != NULL)\n            {\n                if (decvid->picParams[i]->slice_group_id != NULL)\n                {\n                    avcHandle->CBAVC_Free(userData, (int)decvid->picParams[i]->slice_group_id);\n                }\n                avcHandle->CBAVC_Free(userData, (int)decvid->picParams[i]);\n            }\n        }\n        for (i = 0; i < 32; i++)\n        {\n            if (decvid->seqParams[i] != NULL)\n            {\n                avcHandle->CBAVC_Free(userData, (int)decvid->seqParams[i]);\n            }\n        }\n        if (decvid->bitstream != NULL)\n        {\n            avcHandle->CBAVC_Free(userData, (int)decvid->bitstream);\n        }\n\n\n        avcHandle->CBAVC_Free(userData, (int)decvid);\n    }\n\n\n    return ;\n}\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/src/avcdec_bitstream.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/**\nThis file contains bitstream related functions.\n@publishedAll\n*/\n\n#ifndef _AVCDEC_BITSTREAM_H_\n#define _AVCDEC_BITSTREAM_H_\n\n#include \"avcdec_lib.h\"\n\n#define WORD_SIZE   32  /* this can vary, default to 32 bit for now */\n\n#ifndef __cplusplus\n\n#define AVC_GETDATA(x,y)   userData->AVC_GetData(x,y)\n\n#endif\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n#define BitstreamFlushBits(A,B)     {(A)->bitcnt += (B); (A)->incnt -= (B); (A)->curr_word <<= (B);}\n\n    AVCDec_Status AVC_BitstreamFillCache(AVCDecBitstream *stream);\n    /**\n    This function populates bitstream structure.\n    \\param \"stream\" \"Pointer to bitstream structure.\"\n    \\param \"buffer\" \"Pointer to the bitstream buffer.\"\n    \\param \"size\"   \"Size of the buffer.\"\n    \\param \"nal_size\"   \"Size of the NAL unit.\"\n    \\param \"resetall\"   \"Flag for reset everything.\"\n    \\return \"AVCDEC_SUCCESS for success and AVCDEC_FAIL for fail.\"\n    */\n    AVCDec_Status BitstreamInit(AVCDecBitstream *stream, uint8 *buffer, int size);\n\n    /**\n    This function reads next aligned word and remove the emulation prevention code\n    if necessary.\n    \\param \"stream\" \"Pointer to bitstream structure.\"\n    \\return \"Next word.\"\n    */\n    uint BitstreamNextWord(AVCDecBitstream *stream);\n\n    /**\n    This function reads nBits bits from the current position and advance the pointer.\n    \\param \"stream\" \"Pointer to bitstream structure.\"\n    \\param \"nBits\" \"Number of bits to be read.\"\n    \\param \"code\"   \"Point to the read value.\"\n    \\return \"AVCDEC_SUCCESS if successed, AVCDEC_FAIL if number of bits\n                is greater than the word-size, AVCDEC_PACKET_LOSS or\n                AVCDEC_NO_DATA if callback to get data fails.\"\n    */\n    AVCDec_Status BitstreamReadBits(AVCDecBitstream *stream, int nBits, uint *code);\n\n    /**\n    This function shows nBits bits from the current position without advancing the pointer.\n    \\param \"stream\" \"Pointer to bitstream structure.\"\n    \\param \"nBits\" \"Number of bits to be read.\"\n    \\param \"code\"   \"Point to the read value.\"\n    \\return \"AVCDEC_SUCCESS if successed, AVCDEC_FAIL if number of bits\n                    is greater than the word-size, AVCDEC_NO_DATA if it needs\n                    to callback to get data.\"\n    */\n    AVCDec_Status BitstreamShowBits(AVCDecBitstream *stream, int nBits, uint *code);\n\n\n    /**\n    This function flushes nBits bits from the current position.\n    \\param \"stream\" \"Pointer to bitstream structure.\"\n    \\param \"nBits\" \"Number of bits to be read.\"\n    \\return \"AVCDEC_SUCCESS if successed, AVCDEC_FAIL if number of bits\n                    is greater than the word-size It will not call back to get\n                   more data. Users should call BitstreamShowBits to determine\n                   how much they want to flush.\"\n    */\n\n    /**\n    This function read 1 bit from the current position and advance the pointer.\n    \\param \"stream\" \"Pointer to bitstream structure.\"\n    \\param \"nBits\" \"Number of bits to be read.\"\n    \\param \"code\"   \"Point to the read value.\"\n    \\return \"AVCDEC_SUCCESS if successed, AVCDEC_FAIL if number of bits\n                is greater than the word-size, AVCDEC_PACKET_LOSS or\n                AVCDEC_NO_DATA if callback to get data fails.\"\n    */\n    AVCDec_Status BitstreamRead1Bit(AVCDecBitstream *stream, uint *code);\n\n    /**\n    This function checks whether the current bit position is byte-aligned or not.\n    \\param \"stream\" \"Pointer to the bitstream structure.\"\n    \\return \"TRUE if byte-aligned, FALSE otherwise.\"\n    */\n    bool byte_aligned(AVCDecBitstream *stream);\n    AVCDec_Status BitstreamByteAlign(AVCDecBitstream  *stream);\n    /**\n    This function checks whether there are more RBSP data before the trailing bits.\n    \\param \"stream\" \"Pointer to the bitstream structure.\"\n    \\return \"TRUE if yes, FALSE otherwise.\"\n    */\n    bool more_rbsp_data(AVCDecBitstream *stream);\n\n\n#ifdef __cplusplus\n}\n#endif /* __cplusplus  */\n\n#endif /* _AVCDEC_BITSTREAM_H_ */\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/src/avcdec_int.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/**\nThis file contains application function interfaces to the AVC decoder library\nand necessary type defitionitions and enumerations.\nNaming convention for variables:\nlower_case_with_under_line  is  syntax element in subclause 7.2 and 7.3\nnoUnderLine or NoUnderLine  is  derived variables defined somewhere else in the draft\n                                or introduced by this decoder library.\n@publishedAll\n*/\n\n#ifndef _AVCDEC_INT_H_\n#define _AVCDEC_INT_H_\n\n#include \"avcint_common.h\"\n#include \"avcdec_api.h\"\n\n\n/**\nBitstream structure contains bitstream related parameters such as the pointer\nto the buffer, the current byte position and bit position.\n@publishedAll\n*/\ntypedef struct tagDecBitstream\n{\n    uint8 *bitstreamBuffer; /* pointer to buffer memory   */\n    int nal_size;       /* size of the current NAL unit */\n    int data_end_pos;  /* bitstreamBuffer size in bytes */\n    int read_pos;       /* next position to read from bitstreamBuffer  */\n    uint curr_word; /* byte-swapped (MSB left) current word read from buffer */\n    int bit_left;      /* number of bit left in current_word */\n    uint next_word;     /* in case for old data in previous buffer hasn't been flushed. */\n    int incnt;  /* bit left in the prev_word */\n    int incnt_next;\n    int bitcnt;\n    void *userData;\n} AVCDecBitstream;\n\n/**\nThis structure is the main object for AVC decoder library providing access to all\nglobal variables. It is allocated at PVAVCInitDecoder and freed at PVAVCCleanUpDecoder.\n@publishedAll\n*/\ntypedef struct tagDecObject\n{\n\n    AVCCommonObj *common;\n\n    AVCDecBitstream     *bitstream; /* for current NAL */\n\n    /* sequence parameter set */\n    AVCSeqParamSet *seqParams[32]; /* Array of pointers, get allocated at arrival of new seq_id */\n\n    AVCSeqParamSet *lastSPS; /* point to the most recently decoded SPS, for PVAVCDecGetSeqInfo */\n\n    /* picture parameter set */\n    AVCPicParamSet *picParams[256]; /* Array of pointers to picture param set structures */\n\n    /* For internal operation, scratch memory for MV, prediction, transform, etc.*/\n    uint    ref_idx_l0[4]; /* [mbPartIdx], te(v) */\n    uint    ref_idx_l1[4];\n\n    /* function pointers */\n    AVCDec_Status(*residual_block)(struct tagDecObject*, int,  int,\n                                   int *, int *, int *);\n    /* Application control data */\n    AVCHandle *avcHandle;\n    void (*AVC_DebugLog)(AVCLogType type, char *string1, char *string2);\n    /*bool*/\n    uint    debugEnable;\n\n} AVCDecObject;\n\n#endif /* _AVCDEC_INT_H_ */\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/src/avcdec_lib.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/**\nThis file contains declarations of internal functions for AVC decoder library.\n@publishedAll\n*/\n#ifndef _AVCDEC_LIB_H_\n#define _AVCDEC_LIB_H_\n\n#include \"avclib_common.h\"\n#include \"avcdec_int.h\"\n\n/*----------- avcdec_api.c -------------*/\n/**\nThis function takes out the emulation prevention bytes from the input to creat RBSP.\nThe result is written over the input bitstream.\n\\param \"nal_unit\"   \"(I/O) Pointer to the input buffer.\"\n\\param \"size\"       \"(I/O) Pointer to the size of the input/output buffer.\"\n\\return \"AVCDEC_SUCCESS for success and AVCDEC_FAIL otherwise.\"\n*/\nAVCDec_Status EBSPtoRBSP(uint8 *nal_unit, int *size);\n\n/*------------- pred_intra.c ---------------*/\n/**\nThis function is the main entry point to intra prediction operation on a\nmacroblock.\n\\param \"video\"  \"Pointer to AVCCommonObj.\"\n*/\nAVCStatus  IntraMBPrediction(AVCCommonObj *video);\n\nvoid SaveNeighborForIntraPred(AVCCommonObj *video, int offset);\n\nAVCStatus Intra_4x4(AVCCommonObj *video, int component, int SubBlock_indx, uint8 *comp);\nvoid Intra_4x4_Vertical(AVCCommonObj *video, int block_offset);\nvoid Intra_4x4_Horizontal(AVCCommonObj *video, int pitch, int block_offset);\nvoid Intra_4x4_DC(AVCCommonObj *video, int pitch, int block_offset, AVCNeighborAvailability *availability);\nvoid Intra_4x4_Down_Left(AVCCommonObj *video, int block_offset, AVCNeighborAvailability *availability);\nvoid Intra_4x4_Diagonal_Down_Right(AVCCommonObj *video, int pitch, int block_offset);\nvoid Intra_4x4_Diagonal_Vertical_Right(AVCCommonObj *video, int pitch, int block_offset);\nvoid Intra_4x4_Diagonal_Horizontal_Down(AVCCommonObj *video, int pitch, int block_offset);\nvoid Intra_4x4_Vertical_Left(AVCCommonObj *video,  int block_offset, AVCNeighborAvailability *availability);\nvoid Intra_4x4_Horizontal_Up(AVCCommonObj *video, int pitch, int block_offset);\nvoid  Intra_16x16_Vertical(AVCCommonObj *video);\nvoid Intra_16x16_Horizontal(AVCCommonObj *video, int pitch);\nvoid Intra_16x16_DC(AVCCommonObj *video, int pitch);\nvoid Intra_16x16_Plane(AVCCommonObj *video, int pitch);\nvoid Intra_Chroma_DC(AVCCommonObj *video, int pitch, uint8 *predCb, uint8 *predCr);\nvoid  Intra_Chroma_Horizontal(AVCCommonObj *video, int pitch, uint8 *predCb, uint8 *predCr);\nvoid  Intra_Chroma_Vertical(AVCCommonObj *video, uint8 *predCb, uint8 *predCr);\nvoid  Intra_Chroma_Plane(AVCCommonObj *video, int pitch, uint8 *predCb, uint8 *predCr);\n\n/*------------ pred_inter.c ---------------*/\n/**\nThis function is the main entrance to inter prediction operation for\na macroblock. For decoding, this function also calls inverse transform and\ncompensation.\n\\param \"video\"  \"Pointer to AVCCommonObj.\"\n\\return \"void\"\n*/\nvoid InterMBPrediction(AVCCommonObj *video);\n\n/**\nThis function is called for luma motion compensation.\n\\param \"ref\"    \"Pointer to the origin of a reference luma.\"\n\\param \"picwidth\"   \"Width of the picture.\"\n\\param \"picheight\"  \"Height of the picture.\"\n\\param \"x_pos\"  \"X-coordinate of the predicted block in quarter pel resolution.\"\n\\param \"y_pos\"  \"Y-coordinate of the predicted block in quarter pel resolution.\"\n\\param \"pred\"   \"Pointer to the output predicted block.\"\n\\param \"pred_pitch\" \"Width of pred.\"\n\\param \"blkwidth\"   \"Width of the current partition.\"\n\\param \"blkheight\"  \"Height of the current partition.\"\n\\return \"void\"\n*/\nvoid LumaMotionComp(uint8 *ref, int picwidth, int picheight,\n                    int x_pos, int y_pos,\n                    uint8 *pred, int pred_pitch,\n                    int blkwidth, int blkheight);\n\n/**\nFunctions below are special cases for luma motion compensation.\nLumaFullPelMC is for full pixel motion compensation.\nLumaBorderMC is for interpolation in only one dimension.\nLumaCrossMC is for interpolation in one dimension and half point in the other dimension.\nLumaDiagonalMC is for interpolation in diagonal direction.\n\n\\param \"ref\"    \"Pointer to the origin of a reference luma.\"\n\\param \"picwidth\"   \"Width of the picture.\"\n\\param \"picheight\"  \"Height of the picture.\"\n\\param \"x_pos\"  \"X-coordinate of the predicted block in full pel resolution.\"\n\\param \"y_pos\"  \"Y-coordinate of the predicted block in full pel resolution.\"\n\\param \"dx\"     \"Fraction of x_pos in quarter pel.\"\n\\param \"dy\"     \"Fraction of y_pos in quarter pel.\"\n\\param \"curr\"   \"Pointer to the current partition in the current picture.\"\n\\param \"residue\"    \"Pointer to the current partition for the residue block.\"\n\\param \"blkwidth\"   \"Width of the current partition.\"\n\\param \"blkheight\"  \"Height of the current partition.\"\n\\return \"void\"\n*/\nvoid CreatePad(uint8 *ref, int picwidth, int picheight, int x_pos, int y_pos,\n               uint8 *out, int blkwidth, int blkheight);\n\nvoid FullPelMC(uint8 *in, int inwidth, uint8 *out, int outpitch,\n               int blkwidth, int blkheight);\n\nvoid HorzInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch,\n                   int blkwidth, int blkheight, int dx);\n\nvoid HorzInterp2MC(int *in, int inpitch, uint8 *out, int outpitch,\n                   int blkwidth, int blkheight, int dx);\n\nvoid HorzInterp3MC(uint8 *in, int inpitch, int *out, int outpitch,\n                   int blkwidth, int blkheight);\n\nvoid VertInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch,\n                   int blkwidth, int blkheight, int dy);\n\nvoid VertInterp2MC(uint8 *in, int inpitch, int *out, int outpitch,\n                   int blkwidth, int blkheight);\n\nvoid VertInterp3MC(int *in, int inpitch, uint8 *out, int outpitch,\n                   int blkwidth, int blkheight, int dy);\n\nvoid DiagonalInterpMC(uint8 *in1, uint8 *in2, int inpitch,\n                      uint8 *out, int outpitch,\n                      int blkwidth, int blkheight);\n\n\nvoid ChromaMotionComp(uint8 *ref, int picwidth, int picheight,\n                      int x_pos, int y_pos, uint8 *pred, int pred_pitch,\n                      int blkwidth, int blkheight);\n\nvoid ChromaFullPelMC(uint8 *in, int inpitch, uint8 *out, int outpitch,\n                     int blkwidth, int blkheight) ;\nvoid ChromaBorderMC(uint8 *ref, int picwidth, int dx, int dy,\n                    uint8 *pred, int pred_pitch, int blkwidth, int blkheight);\nvoid ChromaDiagonalMC(uint8 *ref, int picwidth, int dx, int dy,\n                      uint8 *pred, int pred_pitch, int blkwidth, int blkheight);\n\nvoid ChromaFullPelMCOutside(uint8 *ref, uint8 *pred, int pred_pitch,\n                            int blkwidth, int blkheight, int x_inc,\n                            int y_inc0, int y_inc1, int x_mid, int y_mid);\nvoid ChromaBorderMCOutside(uint8 *ref, int picwidth, int dx, int dy,\n                           uint8 *pred, int pred_pitch, int blkwidth, int blkheight,\n                           int x_inc, int z_inc, int y_inc0, int y_inc1, int x_mid, int y_mid);\nvoid ChromaDiagonalMCOutside(uint8 *ref, int picwidth,\n                             int dx, int dy, uint8 *pred, int pred_pitch,\n                             int blkwidth, int blkheight, int x_inc, int z_inc,\n                             int y_inc0, int y_inc1, int x_mid, int y_mid);\n\nvoid ChromaDiagonalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                           uint8 *pOut, int predPitch, int blkwidth, int blkheight);\n\nvoid ChromaHorizontalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                             uint8 *pOut, int predPitch, int blkwidth, int blkheight);\n\nvoid ChromaVerticalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                           uint8 *pOut, int predPitch, int blkwidth, int blkheight);\n\nvoid ChromaFullMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                       uint8 *pOut, int predPitch, int blkwidth, int blkheight);\n\nvoid ChromaVerticalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                            uint8 *pOut, int predPitch, int blkwidth, int blkheight);\n\nvoid ChromaHorizontalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                              uint8 *pOut, int predPitch, int blkwidth, int blkheight);\n\nvoid ChromaDiagonalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                            uint8 *pOut, int predPitch, int blkwidth, int blkheight);\n\n\n/*----------- slice.c ---------------*/\n/**\nThis function performs the main decoding loop for slice data including\nINTRA/INTER prediction, transform and quantization and compensation.\nSee decode_frame_slice() in JM.\n\\param \"video\"  \"Pointer to AVCDecObject.\"\n\\return \"AVCDEC_SUCCESS for success, AVCDEC_PICTURE_READY for end-of-picture and AVCDEC_FAIL otherwise.\"\n*/\nAVCDec_Status DecodeSlice(AVCDecObject *video);\nAVCDec_Status ConcealSlice(AVCDecObject *decvid, int mbnum_start, int mbnum_end);\n/**\nThis function performs the decoding of one macroblock.\n\\param \"video\"  \"Pointer to AVCDecObject.\"\n\\param \"prevMbSkipped\"  \"A value derived in 7.3.4.\"\n\\return \"AVCDEC_SUCCESS for success or AVCDEC_FAIL otherwise.\"\n*/\nAVCDec_Status DecodeMB(AVCDecObject *video);\n\n/**\nThis function performs macroblock prediction type decoding as in subclause 7.3.5.1.\n\\param \"video\" \"Pointer to AVCCommonObj.\"\n\\param \"currMB\" \"Pointer to the current macroblock.\"\n\\param \"stream\" \"Pointer to AVCDecBitstream.\"\n\\return \"AVCDEC_SUCCESS for success or AVCDEC_FAIL otherwise.\"\n*/\nAVCDec_Status mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCDecBitstream *stream);\n\n/**\nThis function performs sub-macroblock prediction type decoding as in subclause 7.3.5.2.\n\\param \"video\" \"Pointer to AVCCommonObj.\"\n\\param \"currMB\" \"Pointer to the current macroblock.\"\n\\param \"stream\" \"Pointer to AVCDecBitstream.\"\n\\return \"AVCDEC_SUCCESS for success or AVCDEC_FAIL otherwise.\"\n*/\nAVCDec_Status sub_mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCDecBitstream *stream);\n\n/**\nThis function interprets the mb_type and sets necessary information\nwhen the slice type is AVC_I_SLICE.\nin the macroblock structure.\n\\param \"mblock\" \"Pointer to current AVCMacroblock.\"\n\\param \"mb_type\" \"From the syntax bitstream.\"\n\\return \"void\"\n*/\nvoid InterpretMBModeI(AVCMacroblock *mblock, uint mb_type);\n\n/**\nThis function interprets the mb_type and sets necessary information\nwhen the slice type is AVC_P_SLICE.\nin the macroblock structure.\n\\param \"mblock\" \"Pointer to current AVCMacroblock.\"\n\\param \"mb_type\" \"From the syntax bitstream.\"\n\\return \"void\"\n*/\nvoid InterpretMBModeP(AVCMacroblock *mblock, uint mb_type);\n\n/**\nThis function interprets the mb_type and sets necessary information\nwhen the slice type is AVC_B_SLICE.\nin the macroblock structure.\n\\param \"mblock\" \"Pointer to current AVCMacroblock.\"\n\\param \"mb_type\" \"From the syntax bitstream.\"\n\\return \"void\"\n*/\nvoid InterpretMBModeB(AVCMacroblock *mblock, uint mb_type);\n\n/**\nThis function interprets the mb_type and sets necessary information\nwhen the slice type is AVC_SI_SLICE.\nin the macroblock structure.\n\\param \"mblock\" \"Pointer to current AVCMacroblock.\"\n\\param \"mb_type\" \"From the syntax bitstream.\"\n\\return \"void\"\n*/\nvoid InterpretMBModeSI(AVCMacroblock *mblock, uint mb_type);\n\n/**\nThis function interprets the sub_mb_type and sets necessary information\nwhen the slice type is AVC_P_SLICE.\nin the macroblock structure.\n\\param \"mblock\" \"Pointer to current AVCMacroblock.\"\n\\param \"sub_mb_type\" \"From the syntax bitstream.\"\n\\return \"void\"\n*/\nvoid InterpretSubMBModeP(AVCMacroblock *mblock, uint *sub_mb_type);\n\n/**\nThis function interprets the sub_mb_type and sets necessary information\nwhen the slice type is AVC_B_SLICE.\nin the macroblock structure.\n\\param \"mblock\" \"Pointer to current AVCMacroblock.\"\n\\param \"sub_mb_type\" \"From the syntax bitstream.\"\n\\return \"void\"\n*/\nvoid InterpretSubMBModeB(AVCMacroblock *mblock, uint *sub_mb_type);\n\n/**\nThis function decodes the Intra4x4 prediction mode from neighboring information\nand from the decoded syntax.\n\\param \"video\"  \"Pointer to AVCCommonObj.\"\n\\param \"currMB\" \"Pointer to current macroblock.\"\n\\param \"stream\" \"Pointer to AVCDecBitstream.\"\n\\return \"AVCDEC_SUCCESS or AVCDEC_FAIL.\"\n*/\nAVCDec_Status DecodeIntra4x4Mode(AVCCommonObj *video, AVCMacroblock *currMB, AVCDecBitstream *stream);\n\n/*----------- vlc.c -------------------*/\n/**\nThis function reads and decodes Exp-Golomb codes.\n\\param \"bitstream\" \"Pointer to AVCDecBitstream.\"\n\\param \"codeNum\" \"Pointer to the value of the codeNum.\"\n\\return \"AVCDEC_SUCCESS or AVCDEC_FAIL.\"\n*/\nAVCDec_Status ue_v(AVCDecBitstream *bitstream, uint *codeNum);\n\n/**\nThis function reads and decodes signed Exp-Golomb codes.\n\\param \"bitstream\" \"Pointer to AVCDecBitstream.\"\n\\param \"value\"  \"Pointer to syntax element value.\"\n\\return \"AVCDEC_SUCCESS or AVCDEC_FAIL.\"\n*/\nAVCDec_Status  se_v(AVCDecBitstream *bitstream, int *value);\n\n/**\nThis function reads and decodes signed Exp-Golomb codes for\n32 bit codeword.\n\\param \"bitstream\" \"Pointer to AVCDecBitstream.\"\n\\param \"value\"  \"Pointer to syntax element value.\"\n\\return \"AVCDEC_SUCCESS or AVCDEC_FAIL.\"\n*/\nAVCDec_Status  se_v32bit(AVCDecBitstream *bitstream, int32 *value);\n\n/**\nThis function reads and decodes truncated Exp-Golomb codes.\n\\param \"bitstream\" \"Pointer to AVCDecBitstream.\"\n\\param \"value\"  \"Pointer to syntax element value.\"\n\\param \"range\"  \"Range of the value as input to determine the algorithm.\"\n\\return \"AVCDEC_SUCCESS or AVCDEC_FAIL.\"\n*/\nAVCDec_Status te_v(AVCDecBitstream *bitstream, uint *value, uint range);\n\n/**\nThis function parse Exp-Golomb code from the bitstream.\n\\param \"bitstream\" \"Pointer to AVCDecBitstream.\"\n\\param \"leadingZeros\" \"Pointer to the number of leading zeros.\"\n\\param \"infobits\"   \"Pointer to the value after leading zeros and the first one.\n                    The total number of bits read is 2*leadingZeros + 1.\"\n\\return \"AVCDEC_SUCCESS or AVCDEC_FAIL.\"\n*/\nAVCDec_Status GetEGBitstring(AVCDecBitstream *bitstream, int *leadingZeros, int *infobits);\n\n/**\nThis function parse Exp-Golomb code from the bitstream for 32 bit codewords.\n\\param \"bitstream\" \"Pointer to AVCDecBitstream.\"\n\\param \"leadingZeros\" \"Pointer to the number of leading zeros.\"\n\\param \"infobits\"   \"Pointer to the value after leading zeros and the first one.\n                    The total number of bits read is 2*leadingZeros + 1.\"\n\\return \"AVCDEC_SUCCESS or AVCDEC_FAIL.\"\n*/\nAVCDec_Status GetEGBitstring32bit(AVCDecBitstream *bitstream, int *leadingZeros, uint32 *infobits);\n\n/**\nThis function performs CAVLC decoding of the CBP (coded block pattern) of a macroblock\nby calling ue_v() and then mapping the codeNum to the corresponding CBP value.\n\\param \"currMB\"  \"Pointer to the current AVCMacroblock structure.\"\n\\param \"stream\"  \"Pointer to the AVCDecBitstream.\"\n\\return \"void\"\n*/\nAVCDec_Status DecodeCBP(AVCMacroblock *currMB, AVCDecBitstream *stream);\n\n/**\nThis function decodes the syntax for trailing ones and total coefficient.\nSubject to optimization.\n\\param \"stream\" \"Pointer to the AVCDecBitstream.\"\n\\param \"TrailingOnes\"   \"Pointer to the trailing one variable output.\"\n\\param \"TotalCoeff\" \"Pointer to the total coefficient variable output.\"\n\\param \"nC\" \"Context for number of nonzero coefficient (prediction context).\"\n\\return \"AVCDEC_SUCCESS for success.\"\n*/\nAVCDec_Status ce_TotalCoeffTrailingOnes(AVCDecBitstream *stream, int *TrailingOnes, int *TotalCoeff, int nC);\n\n/**\nThis function decodes the syntax for trailing ones and total coefficient for\nchroma DC block. Subject to optimization.\n\\param \"stream\" \"Pointer to the AVCDecBitstream.\"\n\\param \"TrailingOnes\"   \"Pointer to the trailing one variable output.\"\n\\param \"TotalCoeff\" \"Pointer to the total coefficient variable output.\"\n\\return \"AVCDEC_SUCCESS for success.\"\n*/\nAVCDec_Status ce_TotalCoeffTrailingOnesChromaDC(AVCDecBitstream *stream, int *TrailingOnes, int *TotalCoeff);\n\n/**\nThis function decode a VLC table with 2 output.\n\\param \"stream\" \"Pointer to the AVCDecBitstream.\"\n\\param \"lentab\" \"Table for code length.\"\n\\param \"codtab\" \"Table for code value.\"\n\\param \"tabwidth\" \"Width of the table or alphabet size of the first output.\"\n\\param \"tabheight\"  \"Height of the table or alphabet size of the second output.\"\n\\param \"code1\"  \"Pointer to the first output.\"\n\\param \"code2\"  \"Pointer to the second output.\"\n\\return \"AVCDEC_SUCCESS for success.\"\n*/\nAVCDec_Status code_from_bitstream_2d(AVCDecBitstream *stream, int *lentab, int *codtab, int tabwidth,\n                                     int tabheight, int *code1, int *code2);\n\n/**\nThis function decodes the level_prefix VLC value as in Table 9-6.\n\\param \"stream\" \"Pointer to the AVCDecBitstream.\"\n\\param \"code\"   \"Pointer to the output.\"\n\\return \"AVCDEC_SUCCESS for success.\"\n*/\nAVCDec_Status ce_LevelPrefix(AVCDecBitstream *stream, uint *code);\n\n/**\nThis function decodes total_zeros VLC syntax as in Table 9-7 and 9-8.\n\\param \"stream\" \"Pointer to the AVCDecBitstream.\"\n\\param \"code\"   \"Pointer to the output.\"\n\\param \"TotalCoeff\" \"Context parameter.\"\n\\return \"AVCDEC_SUCCESS for success.\"\n*/\nAVCDec_Status ce_TotalZeros(AVCDecBitstream *stream, int *code, int TotalCoeff);\n\n/**\nThis function decodes total_zeros VLC syntax for chroma DC as in Table 9-9.\n\\param \"stream\" \"Pointer to the AVCDecBitstream.\"\n\\param \"code\"   \"Pointer to the output.\"\n\\param \"TotalCoeff\" \"Context parameter.\"\n\\return \"AVCDEC_SUCCESS for success.\"\n*/\nAVCDec_Status ce_TotalZerosChromaDC(AVCDecBitstream *stream, int *code, int TotalCoeff);\n\n/**\nThis function decodes run_before VLC syntax as in Table 9-10.\n\\param \"stream\" \"Pointer to the AVCDecBitstream.\"\n\\param \"code\"   \"Pointer to the output.\"\n\\param \"zeroLeft\"   \"Context parameter.\"\n\\return \"AVCDEC_SUCCESS for success.\"\n*/\nAVCDec_Status ce_RunBefore(AVCDecBitstream *stream, int *code, int zeroLeft);\n\n/*----------- header.c -------------------*/\n/**\nThis function parses vui_parameters.\n\\param \"decvid\" \"Pointer to AVCDecObject.\"\n\\param \"stream\" \"Pointer to AVCDecBitstream.\"\n\\return \"AVCDEC_SUCCESS or AVCDEC_FAIL.\"\n*/\nAVCDec_Status vui_parameters(AVCDecObject *decvid, AVCDecBitstream *stream, AVCSeqParamSet *currSPS);\nAVCDec_Status sei_payload(AVCDecObject *decvid, AVCDecBitstream *stream, uint payloadType, uint payloadSize);\n\nAVCDec_Status buffering_period(AVCDecObject *decvid, AVCDecBitstream *stream);\nAVCDec_Status pic_timing(AVCDecObject *decvid, AVCDecBitstream *stream);\nAVCDec_Status recovery_point(AVCDecObject *decvid, AVCDecBitstream *stream);\nAVCDec_Status dec_ref_pic_marking_repetition(AVCDecObject *decvid, AVCDecBitstream *stream);\nAVCDec_Status motion_constrained_slice_group_set(AVCDecObject *decvid, AVCDecBitstream *stream);\n\n\n/**\nThis function parses hrd_parameters.\n\\param \"decvid\" \"Pointer to AVCDecObject.\"\n\\param \"stream\" \"Pointer to AVCDecBitstream.\"\n\\return \"AVCDEC_SUCCESS or AVCDEC_FAIL.\"\n*/\nAVCDec_Status hrd_parameters(AVCDecObject *decvid, AVCDecBitstream *stream, AVCHRDParams *HRDParam);\n\n/**\nThis function decodes the syntax in sequence parameter set slice and fill up the AVCSeqParamSet\nstructure.\n\\param \"decvid\" \"Pointer to AVCDecObject.\"\n\\param \"video\" \"Pointer to AVCCommonObj.\"\n\\param \"stream\" \"Pointer to AVCDecBitstream.\"\n\\return \"AVCDEC_SUCCESS or AVCDEC_FAIL.\"\n*/\nAVCDec_Status DecodeSPS(AVCDecObject *decvid, AVCDecBitstream *stream);\n\n/**\nThis function decodes the syntax in picture parameter set and fill up the AVCPicParamSet\nstructure.\n\\param \"decvid\" \"Pointer to AVCDecObject.\"\n\\param \"video\" \"Pointer to AVCCommonObj.\"\n\\param \"stream\" \"Pointer to AVCDecBitstream.\"\n\\return \"AVCDEC_SUCCESS or AVCDEC_FAIL.\"\n*/\nAVCDec_Status DecodePPS(AVCDecObject *decvid, AVCCommonObj *video, AVCDecBitstream *stream);\nAVCDec_Status DecodeSEI(AVCDecObject *decvid, AVCDecBitstream *stream);\n\n/**\nThis function decodes slice header, calls related functions such as\nreference picture list reordering, prediction weight table, decode ref marking.\nSee FirstPartOfSliceHeader() and RestOfSliceHeader() in JM.\n\\param \"decvid\" \"Pointer to AVCDecObject.\"\n\\param \"video\" \"Pointer to AVCCommonObj.\"\n\\param \"stream\" \"Pointer to AVCDecBitstream.\"\n\\return \"AVCDEC_SUCCESS for success and AVCDEC_FAIL otherwise.\"\n*/\nAVCDec_Status DecodeSliceHeader(AVCDecObject *decvid, AVCCommonObj *video, AVCDecBitstream *stream);\n\n/**\nThis function performes necessary operations to create dummy frames when\nthere is a gap in frame_num.\n\\param \"video\"  \"Pointer to AVCCommonObj.\"\n\\return \"AVCDEC_SUCCESS for success and AVCDEC_FAIL otherwise.\"\n*/\nAVCDec_Status fill_frame_num_gap(AVCHandle *avcHandle, AVCCommonObj *video);\n\n/**\nThis function decodes ref_pic_list_reordering related syntax and fill up the AVCSliceHeader\nstructure.\n\\param \"video\" \"Pointer to AVCCommonObj.\"\n\\param \"stream\" \"Pointer to AVCDecBitstream.\"\n\\param \"sliceHdr\" \"Pointer to AVCSliceHdr.\"\n\\param \"slice_type\" \"Value of slice_type - 5 if greater than 5.\"\n\\return \"AVCDEC_SUCCESS for success and AVCDEC_FAIL otherwise.\"\n*/\nAVCDec_Status ref_pic_list_reordering(AVCCommonObj *video, AVCDecBitstream *stream, AVCSliceHeader *sliceHdr, int slice_type);\n\n/**\nThis function decodes dec_ref_pic_marking related syntax  and fill up the AVCSliceHeader\nstructure.\n\\param \"video\" \"Pointer to AVCCommonObj.\"\n\\param \"stream\" \"Pointer to AVCDecBitstream.\"\n\\param \"sliceHdr\" \"Pointer to AVCSliceHdr.\"\n\\return \"AVCDEC_SUCCESS for success and AVCDEC_FAIL otherwise.\"\n*/\nAVCDec_Status dec_ref_pic_marking(AVCCommonObj *video, AVCDecBitstream *stream, AVCSliceHeader *sliceHdr);\n\n/**\nThis function performs POC related operation prior to decoding a picture\n\\param \"video\" \"Pointer to AVCCommonObj.\"\n\\return \"AVCDEC_SUCCESS for success and AVCDEC_FAIL otherwise.\"\nSee also PostPOC() for initialization of some variables.\n*/\nAVCDec_Status DecodePOC(AVCCommonObj *video);\n\n\n\n/*------------ residual.c ------------------*/\n/**\nThis function decodes the intra pcm data and fill it in the corresponding location\non the current picture.\n\\param \"video\"  \"Pointer to AVCCommonObj.\"\n\\param \"stream\" \"Pointer to AVCDecBitstream.\"\n*/\nAVCDec_Status DecodeIntraPCM(AVCCommonObj *video, AVCDecBitstream *stream);\n\n/**\nThis function performs residual syntax decoding as well as quantization and transformation of\nthe decoded coefficients. See subclause 7.3.5.3.\n\\param \"video\"  \"Pointer to AVCDecObject.\"\n\\param \"currMB\" \"Pointer to current macroblock.\"\n*/\nAVCDec_Status residual(AVCDecObject *video, AVCMacroblock *currMB);\n\n/**\nThis function performs CAVLC syntax decoding to get the run and level information of the coefficients.\n\\param \"video\"  \"Pointer to AVCDecObject.\"\n\\param \"type\"   \"One of AVCResidualType for a particular 4x4 block.\"\n\\param \"bx\"     \"Horizontal block index.\"\n\\param \"by\"     \"Vertical block index.\"\n\\param \"level\"  \"Pointer to array of level for output.\"\n\\param \"run\"    \"Pointer to array of run for output.\"\n\\param \"numcoeff\"   \"Pointer to the total number of nonzero coefficients.\"\n\\return \"AVCDEC_SUCCESS for success.\"\n*/\nAVCDec_Status residual_block_cavlc(AVCDecObject *video, int nC, int maxNumCoeff,\n                                   int *level, int *run, int *numcoeff);\n\n#endif /* _AVCDEC_LIB_H_ */\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/src/header.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"avcdec_lib.h\"\n#include \"avcdec_bitstream.h\"\n#include \"oscl_mem.h\"\n#include \"avcdec_api.h\"\n\n/** see subclause 7.4.2.1 */\nAVCDec_Status DecodeSPS(AVCDecObject *decvid, AVCDecBitstream *stream)\n{\n    AVCDec_Status status = AVCDEC_SUCCESS;\n    AVCSeqParamSet *seqParam, tempSeqParam;\n    uint temp;\n    int i;\n    uint profile_idc, constrained_set0_flag, constrained_set1_flag, constrained_set2_flag,constrained_set3_flag;\n    uint level_idc, seq_parameter_set_id;\n    void *userData = decvid->avcHandle->userData;\n    AVCHandle *avcHandle = decvid->avcHandle;\n\n    DEBUG_LOG(userData, AVC_LOGTYPE_INFO, \"DecodeSPS\", -1, -1);\n\n    BitstreamReadBits(stream, 8, &profile_idc);\n    BitstreamRead1Bit(stream, &constrained_set0_flag);\n//  if (profile_idc != 66 && constrained_set0_flag != 1)\n//  {\n//      return AVCDEC_FAIL;\n//  }\n    BitstreamRead1Bit(stream, &constrained_set1_flag);\n    BitstreamRead1Bit(stream, &constrained_set2_flag);\n    BitstreamRead1Bit(stream, &constrained_set3_flag);\n    BitstreamReadBits(stream, 4, &temp);\n    BitstreamReadBits(stream, 8, &level_idc);\n    if (level_idc > 51)\n    {\n        return AVCDEC_FAIL;\n    }\n    if (mapLev2Idx[level_idc] == 255)\n    {\n        return AVCDEC_FAIL;\n    }\n    ue_v(stream, &seq_parameter_set_id);\n\n    if (seq_parameter_set_id > 31)\n    {\n        return AVCDEC_FAIL;\n    }\n\n    /* Allocate sequence param set for seqParams[seq_parameter_set_id]. */\n    if (decvid->seqParams[seq_parameter_set_id] == NULL)  /* allocate seqParams[id] */\n    {\n        decvid->seqParams[seq_parameter_set_id] =\n            (AVCSeqParamSet*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCSeqParamSet), DEFAULT_ATTR);\n\n        if (decvid->seqParams[seq_parameter_set_id] == NULL)\n        {\n            return AVCDEC_MEMORY_FAIL;\n        }\n    }\n\n    DEBUG_LOG(userData, AVC_LOGTYPE_INFO, \"done alloc seqParams\", -1, -1);\n\n    seqParam = &tempSeqParam; // assign to temporary structure first\n    oscl_memset((void*) seqParam, 0, sizeof(AVCSeqParamSet)); // init to 0\n\n    seqParam->profile_idc = profile_idc;\n    seqParam->constrained_set0_flag = constrained_set0_flag;\n    seqParam->constrained_set1_flag = constrained_set1_flag;\n    seqParam->constrained_set2_flag = constrained_set2_flag;\n    seqParam->constrained_set3_flag = constrained_set3_flag;\n    seqParam->level_idc = level_idc;\n    seqParam->seq_parameter_set_id = seq_parameter_set_id;\n\n    /* continue decoding SPS */\n    ue_v(stream, &(seqParam->log2_max_frame_num_minus4));\n\n    if (seqParam->log2_max_frame_num_minus4 > 12)\n    {\n        return AVCDEC_FAIL;\n    }\n\n    ue_v(stream, &(seqParam->pic_order_cnt_type));\n\n    DEBUG_LOG(userData, AVC_LOGTYPE_INFO, \"check point 1\", seqParam->log2_max_frame_num_minus4, seqParam->pic_order_cnt_type);\n\n    if (seqParam->pic_order_cnt_type == 0)\n    {\n        ue_v(stream, &(seqParam->log2_max_pic_order_cnt_lsb_minus4));\n    }\n    else if (seqParam->pic_order_cnt_type == 1)\n    {               // MC_CHECK\n        BitstreamRead1Bit(stream, (uint*)&(seqParam->delta_pic_order_always_zero_flag));\n        se_v32bit(stream, &(seqParam->offset_for_non_ref_pic));\n        se_v32bit(stream, &(seqParam->offset_for_top_to_bottom_field));\n        ue_v(stream, &(seqParam->num_ref_frames_in_pic_order_cnt_cycle));\n\n        for (i = 0; i < (int)(seqParam->num_ref_frames_in_pic_order_cnt_cycle); i++)\n        {\n            se_v32bit(stream, &(seqParam->offset_for_ref_frame[i]));\n        }\n    }\n\n    ue_v(stream, &(seqParam->num_ref_frames));\n\n    if (seqParam->num_ref_frames > 16)\n    {\n        return AVCDEC_FAIL;\n    }\n\n    DEBUG_LOG(userData, AVC_LOGTYPE_INFO, \"check point 2\", seqParam->num_ref_frames, -1);\n\n    BitstreamRead1Bit(stream, (uint*)&(seqParam->gaps_in_frame_num_value_allowed_flag));\n    ue_v(stream, &(seqParam->pic_width_in_mbs_minus1));\n\n    DEBUG_LOG(userData, AVC_LOGTYPE_INFO, \"picwidth\", seqParam->pic_width_in_mbs_minus1, -1);\n\n    ue_v(stream, &(seqParam->pic_height_in_map_units_minus1));\n\n    DEBUG_LOG(userData, AVC_LOGTYPE_INFO, \"picwidth\", seqParam->pic_height_in_map_units_minus1, -1);\n\n    BitstreamRead1Bit(stream, (uint*)&(seqParam->frame_mbs_only_flag));\n\n    seqParam->mb_adaptive_frame_field_flag = 0; /* default value */\n    if (!seqParam->frame_mbs_only_flag)\n    {\n        BitstreamRead1Bit(stream, (uint*)&(seqParam->mb_adaptive_frame_field_flag));\n    }\n\n    DEBUG_LOG(userData, AVC_LOGTYPE_INFO, \"check point 3\", seqParam->frame_mbs_only_flag, -1);\n\n    BitstreamRead1Bit(stream, (uint*)&(seqParam->direct_8x8_inference_flag));\n\n    DEBUG_LOG(userData, AVC_LOGTYPE_INFO, \"check point 4\", seqParam->direct_8x8_inference_flag, -1);\n\n    BitstreamRead1Bit(stream, (uint*)&(seqParam->frame_cropping_flag));\n    seqParam->frame_crop_left_offset = 0;  /* default value */\n    seqParam->frame_crop_right_offset = 0;/* default value */\n    seqParam->frame_crop_top_offset = 0;/* default value */\n    seqParam->frame_crop_bottom_offset = 0;/* default value */\n    if (seqParam->frame_cropping_flag)\n    {\n        ue_v(stream, &(seqParam->frame_crop_left_offset));\n        ue_v(stream, &(seqParam->frame_crop_right_offset));\n        ue_v(stream, &(seqParam->frame_crop_top_offset));\n        ue_v(stream, &(seqParam->frame_crop_bottom_offset));\n    }\n\n    DEBUG_LOG(userData, AVC_LOGTYPE_INFO, \"check point 5\", seqParam->frame_cropping_flag, -1);\n\n    BitstreamRead1Bit(stream, (uint*)&(seqParam->vui_parameters_present_flag));\n    if (seqParam->vui_parameters_present_flag)\n    {\n        status = vui_parameters(decvid, stream, seqParam);\n        if (status != AVCDEC_SUCCESS)\n        {\n            return status;\n        }\n    }\n\n    /* now everything is good, copy it */\n    oscl_memcpy(decvid->seqParams[seq_parameter_set_id], seqParam, sizeof(AVCSeqParamSet));\n\n    decvid->lastSPS = decvid->seqParams[seq_parameter_set_id]; /* for PVAVCDecGetSeqInfo */\n\n    return status;\n}\n\n\nAVCDec_Status vui_parameters(AVCDecObject *decvid, AVCDecBitstream *stream, AVCSeqParamSet *currSPS)\n{\n    uint temp;\n    uint temp32;\n    uint aspect_ratio_idc, overscan_appopriate_flag, video_format, video_full_range_flag;\n    /* aspect_ratio_info_present_flag */\n    BitstreamRead1Bit(stream, &temp);\n    if (temp)\n    {\n        BitstreamReadBits(stream, 8, &aspect_ratio_idc);\n        if (aspect_ratio_idc == 255)\n        {\n            /* sar_width */\n            BitstreamReadBits(stream, 16, &temp);\n            /* sar_height */\n            BitstreamReadBits(stream, 16, &temp);\n        }\n    }\n    /* overscan_info_present */\n    BitstreamRead1Bit(stream, &temp);\n    if (temp)\n    {\n        BitstreamRead1Bit(stream, &overscan_appopriate_flag);\n    }\n    /* video_signal_type_present_flag */\n    BitstreamRead1Bit(stream, &temp);\n    if (temp)\n    {\n        BitstreamReadBits(stream, 3, &video_format);\n        BitstreamRead1Bit(stream, &video_full_range_flag);\n        /* colour_description_present_flag */\n        BitstreamRead1Bit(stream, &temp);\n        if (temp)\n        {\n            /* colour_primaries */\n            BitstreamReadBits(stream, 8, &temp);\n            /* transfer_characteristics */\n            BitstreamReadBits(stream, 8, &temp);\n            /* matrix coefficients */\n            BitstreamReadBits(stream, 8, &temp);\n        }\n    }\n    /*  chroma_loc_info_present_flag */\n    BitstreamRead1Bit(stream, &temp);\n    if (temp)\n    {\n        /*  chroma_sample_loc_type_top_field */\n        ue_v(stream, &temp);\n        /*  chroma_sample_loc_type_bottom_field */\n        ue_v(stream, &temp);\n    }\n\n    /*  timing_info_present_flag*/\n    BitstreamRead1Bit(stream, &temp);\n    if (temp)\n    {\n        /*  num_unit_in_tick*/\n        BitstreamReadBits(stream, 32, &temp32);\n        /*  time_scale */\n        BitstreamReadBits(stream, 32, &temp32);\n        /*  fixed_frame_rate_flag */\n        BitstreamRead1Bit(stream, &temp);\n    }\n\n    /*  nal_hrd_parameters_present_flag */\n    BitstreamRead1Bit(stream, &temp);\n    currSPS->vui_parameters.nal_hrd_parameters_present_flag = temp;\n    if (temp)\n    {\n        hrd_parameters(decvid, stream, &(currSPS->vui_parameters.nal_hrd_parameters));\n    }\n    /*  vcl_hrd_parameters_present_flag*/\n    BitstreamRead1Bit(stream, &temp);\n    currSPS->vui_parameters.vcl_hrd_parameters_present_flag = temp;\n    if (temp)\n    {\n        hrd_parameters(decvid, stream, &(currSPS->vui_parameters.vcl_hrd_parameters));\n    }\n    if (currSPS->vui_parameters.nal_hrd_parameters_present_flag || currSPS->vui_parameters.vcl_hrd_parameters_present_flag)\n    {\n        /*  low_delay_hrd_flag */\n        BitstreamRead1Bit(stream, &temp);\n    }\n    /*  pic_struct_present_flag */\n    BitstreamRead1Bit(stream, &temp);\n    currSPS->vui_parameters.pic_struct_present_flag = temp;\n    /*  bitstream_restriction_flag */\n    BitstreamRead1Bit(stream, &temp);\n    if (temp)\n    {\n        /*  motion_vectors_over_pic_boundaries_flag */\n        BitstreamRead1Bit(stream, &temp);\n        /*  max_bytes_per_pic_denom */\n        ue_v(stream, &temp);\n        /*  max_bits_per_mb_denom */\n        ue_v(stream, &temp);\n        /*  log2_max_mv_length_horizontal */\n        ue_v(stream, &temp);\n        /*  log2_max_mv_length_vertical */\n        ue_v(stream, &temp);\n        /*  num_reorder_frames */\n        ue_v(stream, &temp);\n        /*  max_dec_frame_buffering */\n        ue_v(stream, &temp);\n    }\n    return AVCDEC_SUCCESS;\n}\n\nAVCDec_Status hrd_parameters(AVCDecObject *decvid, AVCDecBitstream *stream, AVCHRDParams *HRDParam)\n{\n    OSCL_UNUSED_ARG(decvid);\n    uint temp;\n    uint cpb_cnt_minus1;\n    uint i;\n    ue_v(stream, &cpb_cnt_minus1);\n    HRDParam->cpb_cnt_minus1 = cpb_cnt_minus1;\n    /*  bit_rate_scale */\n    BitstreamReadBits(stream, 4, &temp);\n    /*  cpb_size_scale */\n    BitstreamReadBits(stream, 4, &temp);\n    for (i = 0; i <= cpb_cnt_minus1; i++)\n    {\n        /*  bit_rate_value_minus1[i] */\n        ue_v(stream, &temp);\n        /*  cpb_size_value_minus1[i] */\n        ue_v(stream, &temp);\n        /*  cbr_flag[i] */\n        ue_v(stream, &temp);\n    }\n    /*  initial_cpb_removal_delay_length_minus1 */\n    BitstreamReadBits(stream, 5, &temp);\n    /*  cpb_removal_delay_length_minus1 */\n    BitstreamReadBits(stream, 5, &temp);\n    HRDParam->cpb_removal_delay_length_minus1 = temp;\n    /*  dpb_output_delay_length_minus1 */\n    BitstreamReadBits(stream, 5, &temp);\n    HRDParam->dpb_output_delay_length_minus1 = temp;\n    /*  time_offset_length  */\n    BitstreamReadBits(stream, 5, &temp);\n    HRDParam->time_offset_length = temp;\n\n    return AVCDEC_SUCCESS;\n}\n\n\n/** see subclause 7.4.2.2 */\nAVCDec_Status DecodePPS(AVCDecObject *decvid, AVCCommonObj *video, AVCDecBitstream *stream)\n{\n    AVCPicParamSet *picParam;\n    AVCDec_Status status;\n    int i, iGroup, numBits;\n    int PicWidthInMbs, PicHeightInMapUnits, PicSizeInMapUnits;\n    uint pic_parameter_set_id, seq_parameter_set_id;\n    uint temp_uint;\n    void *userData = decvid->avcHandle->userData;\n    AVCHandle *avcHandle = decvid->avcHandle;\n    AVCPicParamSet tempPicParam;  /* add this to make sure that we don't overwrite good PPS with corrupted new PPS */\n\n    ue_v(stream, &pic_parameter_set_id);\n    if (pic_parameter_set_id > 255)\n    {\n        return AVCDEC_FAIL;\n    }\n\n    ue_v(stream, &seq_parameter_set_id);\n\n    if (seq_parameter_set_id > 31)\n    {\n        return AVCDEC_FAIL;\n    }\n\n\n    picParam = &tempPicParam;  /* decode everything into this local structure first */\n    oscl_memset((void *) picParam, 0, sizeof(AVCPicParamSet)); // init the structure to 0\n\n    picParam->slice_group_id = NULL;\n\n    picParam->seq_parameter_set_id = seq_parameter_set_id;\n    picParam->pic_parameter_set_id = pic_parameter_set_id;\n\n    BitstreamRead1Bit(stream, (uint*)&(picParam->entropy_coding_mode_flag));\n    if (picParam->entropy_coding_mode_flag)\n    {\n        status = AVCDEC_NOT_SUPPORTED;\n        goto clean_up;\n    }\n    BitstreamRead1Bit(stream, (uint*)&(picParam->pic_order_present_flag));\n    ue_v(stream, &(picParam->num_slice_groups_minus1));\n\n    if (picParam->num_slice_groups_minus1 > MAX_NUM_SLICE_GROUP - 1)\n    {\n        status = AVCDEC_FAIL;\n        goto clean_up;\n    }\n\n    picParam->slice_group_change_rate_minus1 = 0; /* default value */\n    if (picParam->num_slice_groups_minus1 > 0)\n    {\n        ue_v(stream, &(picParam->slice_group_map_type));\n        if (picParam->slice_group_map_type > 6)\n        {\n            status = AVCDEC_FAIL; /* out of range */\n            goto clean_up;\n        }\n\n        if (picParam->slice_group_map_type == 0)\n        {\n            for (iGroup = 0; iGroup <= (int)picParam->num_slice_groups_minus1; iGroup++)\n            {\n                ue_v(stream, &(picParam->run_length_minus1[iGroup]));\n            }\n        }\n        else if (picParam->slice_group_map_type == 2)\n        {   // MC_CHECK  <= or <\n            for (iGroup = 0; iGroup < (int)picParam->num_slice_groups_minus1; iGroup++)\n            {\n                ue_v(stream, &(picParam->top_left[iGroup]));\n                ue_v(stream, &(picParam->bottom_right[iGroup]));\n            }\n        }\n        else if (picParam->slice_group_map_type == 3 ||\n                 picParam->slice_group_map_type == 4 ||\n                 picParam->slice_group_map_type == 5)\n        {\n            BitstreamRead1Bit(stream, (uint*)&(picParam->slice_group_change_direction_flag));\n            ue_v(stream, &(picParam->slice_group_change_rate_minus1));\n        }\n        else if (picParam->slice_group_map_type == 6)\n        {\n            ue_v(stream, &(picParam->pic_size_in_map_units_minus1));\n\n            numBits = 0;/* ceil(log2(num_slice_groups_minus1+1)) bits */\n            i = picParam->num_slice_groups_minus1;\n            while (i > 0)\n            {\n                numBits++;\n                i >>= 1;\n            }\n\n            i = picParam->seq_parameter_set_id;\n            if (decvid->seqParams[i] == NULL)\n            {\n                status = AVCDEC_FAIL;\n                goto clean_up;\n            }\n\n\n            PicWidthInMbs = decvid->seqParams[i]->pic_width_in_mbs_minus1 + 1;\n            PicHeightInMapUnits = decvid->seqParams[i]->pic_height_in_map_units_minus1 + 1 ;\n            PicSizeInMapUnits = PicWidthInMbs * PicHeightInMapUnits ;\n\n            /* information has to be consistent with the seq_param */\n            if ((int)picParam->pic_size_in_map_units_minus1 != PicSizeInMapUnits - 1)\n            {\n                status = AVCDEC_FAIL;\n                goto clean_up;\n            }\n\n            if (picParam->slice_group_id)\n            {\n                avcHandle->CBAVC_Free(userData, (int)picParam->slice_group_id);\n            }\n            picParam->slice_group_id = (uint*)avcHandle->CBAVC_Malloc(userData, sizeof(uint) * PicSizeInMapUnits, DEFAULT_ATTR);\n            if (picParam->slice_group_id == NULL)\n            {\n                status =  AVCDEC_MEMORY_FAIL;\n                goto clean_up;\n            }\n\n            for (i = 0; i < PicSizeInMapUnits; i++)\n            {\n                BitstreamReadBits(stream, numBits, &(picParam->slice_group_id[i]));\n            }\n        }\n    }\n\n    ue_v(stream, &(picParam->num_ref_idx_l0_active_minus1));\n    if (picParam->num_ref_idx_l0_active_minus1 > 31)\n    {\n        status = AVCDEC_FAIL; /* out of range */\n        goto clean_up;\n    }\n\n    ue_v(stream, &(picParam->num_ref_idx_l1_active_minus1));\n    if (picParam->num_ref_idx_l1_active_minus1 > 31)\n    {\n        status = AVCDEC_FAIL; /* out of range */\n        goto clean_up;\n    }\n\n    BitstreamRead1Bit(stream, (uint*)&(picParam->weighted_pred_flag));\n    BitstreamReadBits(stream, 2, &(picParam->weighted_bipred_idc));\n    if (picParam->weighted_bipred_idc > 2)\n    {\n        status = AVCDEC_FAIL; /* out of range */\n        goto clean_up;\n    }\n\n    se_v(stream, &(picParam->pic_init_qp_minus26));\n    if (picParam->pic_init_qp_minus26 < -26 || picParam->pic_init_qp_minus26 > 25)\n    {\n        status = AVCDEC_FAIL; /* out of range */\n        goto clean_up;\n    }\n\n    se_v(stream, &(picParam->pic_init_qs_minus26));\n    if (picParam->pic_init_qs_minus26 < -26 || picParam->pic_init_qs_minus26 > 25)\n    {\n        status = AVCDEC_FAIL; /* out of range */\n        goto clean_up;\n    }\n\n    se_v(stream, &(picParam->chroma_qp_index_offset));\n    if (picParam->chroma_qp_index_offset < -12 || picParam->chroma_qp_index_offset > 12)\n    {\n        status = AVCDEC_FAIL; /* out of range */\n        goto clean_up;\n    }\n\n    BitstreamReadBits(stream, 3, &temp_uint);\n    picParam->deblocking_filter_control_present_flag = temp_uint >> 2;\n    picParam->constrained_intra_pred_flag = (temp_uint >> 1) & 1;\n    picParam->redundant_pic_cnt_present_flag = temp_uint & 1;\n\n    // add this final check\n    if (decvid->seqParams[picParam->seq_parameter_set_id] == NULL) // associated SPS is not found\n    {\n        status = AVCDEC_FAIL;\n        goto clean_up;\n    }\n\n    // now that everything is OK - we may want to allocate the structure\n    /* 2.1 if picParams[pic_param_set_id] is NULL, allocate it. */\n    if (decvid->picParams[pic_parameter_set_id] == NULL)\n    {\n        decvid->picParams[pic_parameter_set_id] =\n            (AVCPicParamSet*)avcHandle->CBAVC_Malloc(userData, sizeof(AVCPicParamSet), DEFAULT_ATTR);\n        if (decvid->picParams[pic_parameter_set_id] == NULL)\n        {\n            return AVCDEC_MEMORY_FAIL;\n        }\n\n        oscl_memset(decvid->picParams[pic_parameter_set_id], 0, sizeof(AVCPicParamSet));\n    }\n\n    /* Everything is successful, now copy it to the global structure */\n    oscl_memcpy(decvid->picParams[pic_parameter_set_id], picParam, sizeof(AVCPicParamSet));\n\n    video->currPicParams = decvid->picParams[pic_parameter_set_id];\n\n\n    return AVCDEC_SUCCESS;\nclean_up:\n\n    if (picParam->slice_group_id != NULL)\n    {\n        avcHandle->CBAVC_Free(userData, (int)picParam->slice_group_id);\n    }\n\n    return status;\n}\n\n\n/* FirstPartOfSliceHeader();\n    RestOfSliceHeader() */\n/** see subclause 7.4.3 */\nAVCDec_Status DecodeSliceHeader(AVCDecObject *decvid, AVCCommonObj *video, AVCDecBitstream *stream)\n{\n    AVCSliceHeader *sliceHdr = video->sliceHdr;\n    AVCPicParamSet *currPPS;\n    AVCSeqParamSet *currSPS;\n    AVCDec_Status status;\n    uint idr_pic_id;\n    int slice_type, temp, i;\n\n    ue_v(stream, &(sliceHdr->first_mb_in_slice));\n    ue_v(stream, (uint*)&slice_type);\n\n    if (sliceHdr->first_mb_in_slice != 0)\n    {\n        if ((int)sliceHdr->slice_type >= 5 && (slice_type != (int)sliceHdr->slice_type) && (slice_type != (int)sliceHdr->slice_type - 5))\n        {\n            return AVCDEC_FAIL; /* slice type doesn't follow the first slice in the picture */\n        }\n    }\n    sliceHdr->slice_type = (AVCSliceType) slice_type;\n    if (slice_type > 4)\n    {\n        slice_type -= 5;\n    }\n\n    if (slice_type == 1 || slice_type > 2)\n    {\n        return AVCDEC_NOT_SUPPORTED;\n    }\n\n    video->slice_type = (AVCSliceType) slice_type;\n\n    ue_v(stream, &(sliceHdr->pic_parameter_set_id));\n    /* end FirstPartSliceHeader() */\n    /* begin RestOfSliceHeader() */\n    /* after getting pic_parameter_set_id, we have to load corresponding SPS and PPS */\n    if (sliceHdr->pic_parameter_set_id > 255)\n    {\n        return AVCDEC_FAIL;\n    }\n\n    if (decvid->picParams[sliceHdr->pic_parameter_set_id] == NULL)\n        return AVCDEC_FAIL; /* PPS doesn't exist */\n\n    currPPS = video->currPicParams = decvid->picParams[sliceHdr->pic_parameter_set_id];\n\n    if (decvid->seqParams[currPPS->seq_parameter_set_id] == NULL)\n        return AVCDEC_FAIL; /* SPS doesn't exist */\n\n    currSPS = video->currSeqParams = decvid->seqParams[currPPS->seq_parameter_set_id];\n\n    if (currPPS->seq_parameter_set_id != video->seq_parameter_set_id)\n    {\n        video->seq_parameter_set_id = currPPS->seq_parameter_set_id;\n        status = (AVCDec_Status)AVCConfigureSequence(decvid->avcHandle, video, false);\n        if (status != AVCDEC_SUCCESS)\n            return status;\n\n        video->level_idc = currSPS->level_idc;\n    }\n\n    /* derived variables from SPS */\n    video->MaxFrameNum = 1 << (currSPS->log2_max_frame_num_minus4 + 4);\n    // MC_OPTIMIZE\n    video->PicWidthInMbs = currSPS->pic_width_in_mbs_minus1 + 1;\n    video->PicWidthInSamplesL = video->PicWidthInMbs * 16 ;\n    video->PicWidthInSamplesC = video->PicWidthInMbs * 8 ;\n    video->PicHeightInMapUnits = currSPS->pic_height_in_map_units_minus1 + 1 ;\n    video->PicSizeInMapUnits = video->PicWidthInMbs * video->PicHeightInMapUnits ;\n    video->FrameHeightInMbs = (2 - currSPS->frame_mbs_only_flag) * video->PicHeightInMapUnits ;\n\n    /* derived from PPS */\n    video->SliceGroupChangeRate = currPPS->slice_group_change_rate_minus1 + 1;\n\n    /* then we can continue decoding slice header */\n\n    BitstreamReadBits(stream, currSPS->log2_max_frame_num_minus4 + 4, &(sliceHdr->frame_num));\n\n    if (video->currFS == NULL && sliceHdr->frame_num != 0)\n    {\n        video->prevFrameNum = video->PrevRefFrameNum = sliceHdr->frame_num - 1;\n    }\n\n    if (!currSPS->frame_mbs_only_flag)\n    {\n        BitstreamRead1Bit(stream, &(sliceHdr->field_pic_flag));\n        if (sliceHdr->field_pic_flag)\n        {\n            return AVCDEC_NOT_SUPPORTED;\n        }\n    }\n\n    /* derived variables from slice header*/\n    video->PicHeightInMbs = video->FrameHeightInMbs;\n    video->PicHeightInSamplesL = video->PicHeightInMbs * 16;\n    video->PicHeightInSamplesC = video->PicHeightInMbs * 8;\n    video->PicSizeInMbs = video->PicWidthInMbs * video->PicHeightInMbs;\n\n    if (sliceHdr->first_mb_in_slice >= video->PicSizeInMbs)\n    {\n        return AVCDEC_FAIL;\n    }\n    video->MaxPicNum = video->MaxFrameNum;\n    video->CurrPicNum = sliceHdr->frame_num;\n\n\n    if (video->nal_unit_type == AVC_NALTYPE_IDR)\n    {\n        if (sliceHdr->frame_num != 0)\n        {\n            return AVCDEC_FAIL;\n        }\n        ue_v(stream, &idr_pic_id);\n    }\n\n    sliceHdr->delta_pic_order_cnt_bottom = 0; /* default value */\n    sliceHdr->delta_pic_order_cnt[0] = 0; /* default value */\n    sliceHdr->delta_pic_order_cnt[1] = 0; /* default value */\n    if (currSPS->pic_order_cnt_type == 0)\n    {\n        BitstreamReadBits(stream, currSPS->log2_max_pic_order_cnt_lsb_minus4 + 4,\n                          &(sliceHdr->pic_order_cnt_lsb));\n        video->MaxPicOrderCntLsb =  1 << (currSPS->log2_max_pic_order_cnt_lsb_minus4 + 4);\n        if (sliceHdr->pic_order_cnt_lsb > video->MaxPicOrderCntLsb - 1)\n            return AVCDEC_FAIL; /* out of range */\n\n        if (currPPS->pic_order_present_flag)\n        {\n            se_v32bit(stream, &(sliceHdr->delta_pic_order_cnt_bottom));\n        }\n    }\n    if (currSPS->pic_order_cnt_type == 1 && !currSPS->delta_pic_order_always_zero_flag)\n    {\n        se_v32bit(stream, &(sliceHdr->delta_pic_order_cnt[0]));\n        if (currPPS->pic_order_present_flag)\n        {\n            se_v32bit(stream, &(sliceHdr->delta_pic_order_cnt[1]));\n        }\n    }\n\n    sliceHdr->redundant_pic_cnt = 0; /* default value */\n    if (currPPS->redundant_pic_cnt_present_flag)\n    {\n        // MC_CHECK\n        ue_v(stream, &(sliceHdr->redundant_pic_cnt));\n        if (sliceHdr->redundant_pic_cnt > 127) /* out of range */\n            return AVCDEC_FAIL;\n\n        if (sliceHdr->redundant_pic_cnt > 0) /* redundant picture */\n            return AVCDEC_NOT_SUPPORTED; /* not supported */\n    }\n    sliceHdr->num_ref_idx_l0_active_minus1 = currPPS->num_ref_idx_l0_active_minus1;\n    sliceHdr->num_ref_idx_l1_active_minus1 = currPPS->num_ref_idx_l1_active_minus1;\n\n    if (slice_type == AVC_P_SLICE)\n    {\n        BitstreamRead1Bit(stream, &(sliceHdr->num_ref_idx_active_override_flag));\n        if (sliceHdr->num_ref_idx_active_override_flag)\n        {\n            ue_v(stream, &(sliceHdr->num_ref_idx_l0_active_minus1));\n        }\n    }\n\n    // check bound\n    if (sliceHdr->num_ref_idx_l0_active_minus1 > 15) // ||sliceHdr->num_ref_idx_l1_active_minus1 > 31)\n    {\n        return AVCDEC_FAIL; /* not allowed */\n    }\n\n    /* if MbaffFrameFlag =1,\n    max value of index is num_ref_idx_l0_active_minus1 for frame MBs and\n    2*sliceHdr->num_ref_idx_l0_active_minus1 + 1 for field MBs */\n\n    /* ref_pic_list_reordering() */\n    status = ref_pic_list_reordering(video, stream, sliceHdr, slice_type);\n    if (status != AVCDEC_SUCCESS)\n    {\n        return status;\n    }\n\n\n    if (video->nal_ref_idc != 0)\n    {\n        dec_ref_pic_marking(video, stream, sliceHdr);\n    }\n    se_v(stream, &(sliceHdr->slice_qp_delta));\n\n    video->QPy = 26 + currPPS->pic_init_qp_minus26 + sliceHdr->slice_qp_delta;\n    if (video->QPy > 51 || video->QPy < 0)\n    {\n        video->QPy = AVC_CLIP3(0, 51, video->QPy);\n//                  return AVCDEC_FAIL;\n    }\n    video->QPc = mapQPi2QPc[AVC_CLIP3(0, 51, video->QPy + video->currPicParams->chroma_qp_index_offset)];\n\n    video->QPy_div_6 = (video->QPy * 43) >> 8;\n    video->QPy_mod_6 = video->QPy - 6 * video->QPy_div_6;\n\n    video->QPc_div_6 = (video->QPc * 43) >> 8;\n    video->QPc_mod_6 = video->QPc - 6 * video->QPc_div_6;\n\n    sliceHdr->slice_alpha_c0_offset_div2 = 0;\n    sliceHdr->slice_beta_offset_div_2 = 0;\n    sliceHdr->disable_deblocking_filter_idc = 0;\n    video->FilterOffsetA = video->FilterOffsetB = 0;\n\n    if (currPPS->deblocking_filter_control_present_flag)\n    {\n        ue_v(stream, &(sliceHdr->disable_deblocking_filter_idc));\n        if (sliceHdr->disable_deblocking_filter_idc > 2)\n        {\n            return AVCDEC_FAIL; /* out of range */\n        }\n        if (sliceHdr->disable_deblocking_filter_idc != 1)\n        {\n            se_v(stream, &(sliceHdr->slice_alpha_c0_offset_div2));\n            if (sliceHdr->slice_alpha_c0_offset_div2 < -6 ||\n                    sliceHdr->slice_alpha_c0_offset_div2 > 6)\n            {\n                return AVCDEC_FAIL;\n            }\n            video->FilterOffsetA = sliceHdr->slice_alpha_c0_offset_div2 << 1;\n\n            se_v(stream, &(sliceHdr->slice_beta_offset_div_2));\n            if (sliceHdr->slice_beta_offset_div_2 < -6 ||\n                    sliceHdr->slice_beta_offset_div_2 > 6)\n            {\n                return AVCDEC_FAIL;\n            }\n            video->FilterOffsetB = sliceHdr->slice_beta_offset_div_2 << 1;\n        }\n    }\n\n    if (currPPS->num_slice_groups_minus1 > 0 && currPPS->slice_group_map_type >= 3\n            && currPPS->slice_group_map_type <= 5)\n    {\n        /* Ceil(Log2(PicSizeInMapUnits/(float)SliceGroupChangeRate + 1)) */\n        temp = video->PicSizeInMapUnits / video->SliceGroupChangeRate;\n        if (video->PicSizeInMapUnits % video->SliceGroupChangeRate)\n        {\n            temp++;\n        }\n        i = 0;\n        temp++;\n        while (temp)\n        {\n            temp >>= 1;\n            i++;\n        }\n\n        BitstreamReadBits(stream, i, &(sliceHdr->slice_group_change_cycle));\n        video->MapUnitsInSliceGroup0 =\n            AVC_MIN(sliceHdr->slice_group_change_cycle * video->SliceGroupChangeRate, video->PicSizeInMapUnits);\n    }\n\n    return AVCDEC_SUCCESS;\n}\n\n\nAVCDec_Status fill_frame_num_gap(AVCHandle *avcHandle, AVCCommonObj *video)\n{\n    AVCDec_Status status;\n    int CurrFrameNum;\n    int UnusedShortTermFrameNum;\n    int tmp1 = video->sliceHdr->delta_pic_order_cnt[0];\n    int tmp2 = video->sliceHdr->delta_pic_order_cnt[1];\n    int tmp3 = video->CurrPicNum;\n    int tmp4 = video->sliceHdr->adaptive_ref_pic_marking_mode_flag;\n    UnusedShortTermFrameNum = (video->prevFrameNum + 1) % video->MaxFrameNum;\n    CurrFrameNum = video->sliceHdr->frame_num;\n\n    video->sliceHdr->delta_pic_order_cnt[0] = 0;\n    video->sliceHdr->delta_pic_order_cnt[1] = 0;\n    while (CurrFrameNum != UnusedShortTermFrameNum)\n    {\n        video->CurrPicNum = UnusedShortTermFrameNum;\n        video->sliceHdr->frame_num = UnusedShortTermFrameNum;\n\n        status = (AVCDec_Status)DPBInitBuffer(avcHandle, video);\n        if (status != AVCDEC_SUCCESS)  /* no buffer available */\n        {\n            return status;\n        }\n        DecodePOC(video);\n        DPBInitPic(video, UnusedShortTermFrameNum);\n\n\n        video->currFS->PicOrderCnt = video->PicOrderCnt;\n        video->currFS->FrameNum = video->sliceHdr->frame_num;\n\n        /* initialize everything to zero */\n        video->currFS->IsOutputted = 0x01;\n        video->currFS->IsReference = 3;\n        video->currFS->IsLongTerm = 0;\n        video->currFS->frame.isReference = TRUE;\n        video->currFS->frame.isLongTerm = FALSE;\n\n        video->sliceHdr->adaptive_ref_pic_marking_mode_flag = 0;\n\n        status = (AVCDec_Status)StorePictureInDPB(avcHandle, video);  // MC_CHECK check the return status\n        if (status != AVCDEC_SUCCESS)\n        {\n            return AVCDEC_FAIL;\n        }\n        video->prevFrameNum = UnusedShortTermFrameNum;\n        UnusedShortTermFrameNum = (UnusedShortTermFrameNum + 1) % video->MaxFrameNum;\n    }\n    video->sliceHdr->frame_num = CurrFrameNum;\n    video->CurrPicNum = tmp3;\n    video->sliceHdr->delta_pic_order_cnt[0] = tmp1;\n    video->sliceHdr->delta_pic_order_cnt[1] = tmp2;\n    video->sliceHdr->adaptive_ref_pic_marking_mode_flag = tmp4;\n    return AVCDEC_SUCCESS;\n}\n\n/** see subclause 7.4.3.1 */\nAVCDec_Status ref_pic_list_reordering(AVCCommonObj *video, AVCDecBitstream *stream, AVCSliceHeader *sliceHdr, int slice_type)\n{\n    int i;\n\n    if (slice_type != AVC_I_SLICE)\n    {\n        BitstreamRead1Bit(stream, &(sliceHdr->ref_pic_list_reordering_flag_l0));\n        if (sliceHdr->ref_pic_list_reordering_flag_l0)\n        {\n            i = 0;\n            do\n            {\n                ue_v(stream, &(sliceHdr->reordering_of_pic_nums_idc_l0[i]));\n                if (sliceHdr->reordering_of_pic_nums_idc_l0[i] == 0 ||\n                        sliceHdr->reordering_of_pic_nums_idc_l0[i] == 1)\n                {\n                    ue_v(stream, &(sliceHdr->abs_diff_pic_num_minus1_l0[i]));\n                    if (sliceHdr->reordering_of_pic_nums_idc_l0[i] == 0 &&\n                            sliceHdr->abs_diff_pic_num_minus1_l0[i] > video->MaxPicNum / 2 - 1)\n                    {\n                        return AVCDEC_FAIL; /* out of range */\n                    }\n                    if (sliceHdr->reordering_of_pic_nums_idc_l0[i] == 1 &&\n                            sliceHdr->abs_diff_pic_num_minus1_l0[i] > video->MaxPicNum / 2 - 2)\n                    {\n                        return AVCDEC_FAIL; /* out of range */\n                    }\n                }\n                else if (sliceHdr->reordering_of_pic_nums_idc_l0[i] == 2)\n                {\n                    ue_v(stream, &(sliceHdr->long_term_pic_num_l0[i]));\n                }\n                i++;\n            }\n            while (sliceHdr->reordering_of_pic_nums_idc_l0[i-1] != 3\n                    && i <= (int)sliceHdr->num_ref_idx_l0_active_minus1 + 1) ;\n\n            if (sliceHdr->reordering_of_pic_nums_idc_l0[i-1] != 3) // only way to exit the while loop\n            {\n                return AVCDEC_FAIL;\n            }\n        }\n    }\n    return AVCDEC_SUCCESS;\n}\n\n/** see subclause 7.4.3.3 */\nAVCDec_Status dec_ref_pic_marking(AVCCommonObj *video, AVCDecBitstream *stream, AVCSliceHeader *sliceHdr)\n{\n    int i;\n    if (video->nal_unit_type == AVC_NALTYPE_IDR)\n    {\n        BitstreamRead1Bit(stream, &(sliceHdr->no_output_of_prior_pics_flag));\n        BitstreamRead1Bit(stream, &(sliceHdr->long_term_reference_flag));\n        if (sliceHdr->long_term_reference_flag == 0) /* used for short-term */\n        {\n            video->MaxLongTermFrameIdx = -1; /* no long-term frame indx */\n        }\n        else /* used for long-term */\n        {\n            video->MaxLongTermFrameIdx = 0;\n            video->LongTermFrameIdx = 0;\n        }\n    }\n    else\n    {\n        BitstreamRead1Bit(stream, &(sliceHdr->adaptive_ref_pic_marking_mode_flag));\n        if (sliceHdr->adaptive_ref_pic_marking_mode_flag)\n        {\n            i = 0;\n            do\n            {\n                ue_v(stream, &(sliceHdr->memory_management_control_operation[i]));\n                if (sliceHdr->memory_management_control_operation[i] == 1 ||\n                        sliceHdr->memory_management_control_operation[i] == 3)\n                {\n                    ue_v(stream, &(sliceHdr->difference_of_pic_nums_minus1[i]));\n                }\n                if (sliceHdr->memory_management_control_operation[i] == 2)\n                {\n                    ue_v(stream, &(sliceHdr->long_term_pic_num[i]));\n                }\n                if (sliceHdr->memory_management_control_operation[i] == 3 ||\n                        sliceHdr->memory_management_control_operation[i] == 6)\n                {\n                    ue_v(stream, &(sliceHdr->long_term_frame_idx[i]));\n                }\n                if (sliceHdr->memory_management_control_operation[i] == 4)\n                {\n                    ue_v(stream, &(sliceHdr->max_long_term_frame_idx_plus1[i]));\n                }\n                i++;\n            }\n            while (sliceHdr->memory_management_control_operation[i-1] != 0 && i < MAX_DEC_REF_PIC_MARKING);\n            if (i >= MAX_DEC_REF_PIC_MARKING)\n            {\n                return AVCDEC_FAIL; /* we're screwed!!, not enough memory */\n            }\n        }\n    }\n\n    return AVCDEC_SUCCESS;\n}\n\n/* see subclause 8.2.1 Decoding process for picture order count. */\nAVCDec_Status DecodePOC(AVCCommonObj *video)\n{\n    AVCSeqParamSet *currSPS = video->currSeqParams;\n    AVCSliceHeader *sliceHdr = video->sliceHdr;\n    int i;\n\n    switch (currSPS->pic_order_cnt_type)\n    {\n        case 0: /* POC MODE 0 , subclause 8.2.1.1 */\n            if (video->nal_unit_type == AVC_NALTYPE_IDR)\n            {\n                video->prevPicOrderCntMsb = 0;\n                video->prevPicOrderCntLsb = 0;\n            }\n\n            /* Calculate the MSBs of current picture */\n            if (sliceHdr->pic_order_cnt_lsb  <  video->prevPicOrderCntLsb  &&\n                    (video->prevPicOrderCntLsb - sliceHdr->pic_order_cnt_lsb)  >= (video->MaxPicOrderCntLsb / 2))\n                video->PicOrderCntMsb = video->prevPicOrderCntMsb + video->MaxPicOrderCntLsb;\n            else if (sliceHdr->pic_order_cnt_lsb  >  video->prevPicOrderCntLsb  &&\n                     (sliceHdr->pic_order_cnt_lsb - video->prevPicOrderCntLsb)  > (video->MaxPicOrderCntLsb / 2))\n                video->PicOrderCntMsb = video->prevPicOrderCntMsb - video->MaxPicOrderCntLsb;\n            else\n                video->PicOrderCntMsb = video->prevPicOrderCntMsb;\n\n            /* JVT-I010 page 81 is different from JM7.3 */\n\n\n            video->PicOrderCnt = video->TopFieldOrderCnt = video->PicOrderCntMsb + sliceHdr->pic_order_cnt_lsb;\n            video->BottomFieldOrderCnt = video->TopFieldOrderCnt + sliceHdr->delta_pic_order_cnt_bottom;\n\n            break;\n\n\n        case 1: /* POC MODE 1, subclause 8.2.1.2 */\n            /* calculate FrameNumOffset */\n            if (video->nal_unit_type == AVC_NALTYPE_IDR)\n            {\n                video->prevFrameNumOffset = 0;\n                video->FrameNumOffset = 0;\n            }\n            else if (video->prevFrameNum > sliceHdr->frame_num)\n            {\n                video->FrameNumOffset = video->prevFrameNumOffset + video->MaxFrameNum;\n            }\n            else\n            {\n                video->FrameNumOffset = video->prevFrameNumOffset;\n            }\n            /* calculate absFrameNum */\n            if (currSPS->num_ref_frames_in_pic_order_cnt_cycle)\n            {\n                video->absFrameNum = video->FrameNumOffset + sliceHdr->frame_num;\n            }\n            else\n            {\n                video->absFrameNum = 0;\n            }\n\n            if (video->absFrameNum > 0 && video->nal_ref_idc == 0)\n            {\n                video->absFrameNum--;\n            }\n\n            /* derive picOrderCntCycleCnt and frameNumInPicOrderCntCycle */\n            if (video->absFrameNum > 0)\n            {\n                video->picOrderCntCycleCnt = (video->absFrameNum - 1) / currSPS->num_ref_frames_in_pic_order_cnt_cycle;\n                video->frameNumInPicOrderCntCycle = (video->absFrameNum - 1) % currSPS->num_ref_frames_in_pic_order_cnt_cycle;\n            }\n            /* derive expectedDeltaPerPicOrderCntCycle */\n            video->expectedDeltaPerPicOrderCntCycle = 0;\n            for (i = 0; i < (int)currSPS->num_ref_frames_in_pic_order_cnt_cycle; i++)\n            {\n                video->expectedDeltaPerPicOrderCntCycle += currSPS->offset_for_ref_frame[i];\n            }\n            /* derive expectedPicOrderCnt */\n            if (video->absFrameNum)\n            {\n                video->expectedPicOrderCnt = video->picOrderCntCycleCnt * video->expectedDeltaPerPicOrderCntCycle;\n                for (i = 0; i <= video->frameNumInPicOrderCntCycle; i++)\n                {\n                    video->expectedPicOrderCnt += currSPS->offset_for_ref_frame[i];\n                }\n            }\n            else\n            {\n                video->expectedPicOrderCnt = 0;\n            }\n\n            if (video->nal_ref_idc == 0)\n            {\n                video->expectedPicOrderCnt += currSPS->offset_for_non_ref_pic;\n            }\n            /* derive TopFieldOrderCnt and BottomFieldOrderCnt */\n\n            video->TopFieldOrderCnt = video->expectedPicOrderCnt + sliceHdr->delta_pic_order_cnt[0];\n            video->BottomFieldOrderCnt = video->TopFieldOrderCnt + currSPS->offset_for_top_to_bottom_field + sliceHdr->delta_pic_order_cnt[1];\n\n            video->PicOrderCnt = AVC_MIN(video->TopFieldOrderCnt, video->BottomFieldOrderCnt);\n\n\n            break;\n\n\n        case 2: /* POC MODE 2, subclause 8.2.1.3 */\n            if (video->nal_unit_type == AVC_NALTYPE_IDR)\n            {\n                video->FrameNumOffset = 0;\n            }\n            else if (video->prevFrameNum > sliceHdr->frame_num)\n            {\n                video->FrameNumOffset = video->prevFrameNumOffset + video->MaxFrameNum;\n            }\n            else\n            {\n                video->FrameNumOffset = video->prevFrameNumOffset;\n            }\n            /* derive tempPicOrderCnt, we just use PicOrderCnt */\n            if (video->nal_unit_type == AVC_NALTYPE_IDR)\n            {\n                video->PicOrderCnt = 0;\n            }\n            else if (video->nal_ref_idc == 0)\n            {\n                video->PicOrderCnt = 2 * (video->FrameNumOffset + sliceHdr->frame_num) - 1;\n            }\n            else\n            {\n                video->PicOrderCnt = 2 * (video->FrameNumOffset + sliceHdr->frame_num);\n            }\n            video->TopFieldOrderCnt = video->BottomFieldOrderCnt = video->PicOrderCnt;\n            break;\n        default:\n            return AVCDEC_FAIL;\n    }\n\n    return AVCDEC_SUCCESS;\n}\n\n\nAVCDec_Status DecodeSEI(AVCDecObject *decvid, AVCDecBitstream *stream)\n{\n    OSCL_UNUSED_ARG(decvid);\n    OSCL_UNUSED_ARG(stream);\n    return AVCDEC_SUCCESS;\n}\n\nAVCDec_Status sei_payload(AVCDecObject *decvid, AVCDecBitstream *stream, uint payloadType, uint payloadSize)\n{\n    AVCDec_Status status = AVCDEC_SUCCESS;\n    uint i;\n    switch (payloadType)\n    {\n        case 0:\n            /*  buffering period SEI */\n            status = buffering_period(decvid, stream);\n            break;\n        case 1:\n            /*  picture timing SEI */\n            status = pic_timing(decvid, stream);\n            break;\n        case 2:\n\n        case 3:\n\n        case 4:\n\n        case 5:\n\n        case 8:\n\n        case 9:\n\n        case 10:\n\n        case 11:\n\n        case 12:\n\n        case 13:\n\n        case 14:\n\n        case 15:\n\n        case 16:\n\n        case 17:\n            for (i = 0; i < payloadSize; i++)\n            {\n                BitstreamFlushBits(stream, 8);\n            }\n            break;\n        case 6:\n            /*      recovery point SEI              */\n            status = recovery_point(decvid, stream);\n            break;\n        case 7:\n            /*      decoded reference picture marking repetition SEI */\n            status = dec_ref_pic_marking_repetition(decvid, stream);\n            break;\n\n        case 18:\n            /*      motion-constrained slice group set SEI */\n            status = motion_constrained_slice_group_set(decvid, stream);\n            break;\n        default:\n            /*          reserved_sei_message */\n            for (i = 0; i < payloadSize; i++)\n            {\n                BitstreamFlushBits(stream, 8);\n            }\n            break;\n    }\n    BitstreamByteAlign(stream);\n    return status;\n}\n\nAVCDec_Status buffering_period(AVCDecObject *decvid, AVCDecBitstream *stream)\n{\n    AVCSeqParamSet *currSPS;\n    uint seq_parameter_set_id;\n    uint temp;\n    uint i;\n    ue_v(stream, &seq_parameter_set_id);\n    if (seq_parameter_set_id > 31)\n    {\n        return AVCDEC_FAIL;\n    }\n\n//  decvid->common->seq_parameter_set_id = seq_parameter_set_id;\n\n    currSPS = decvid->seqParams[seq_parameter_set_id];\n    if (currSPS->vui_parameters.nal_hrd_parameters_present_flag)\n    {\n        for (i = 0; i <= currSPS->vui_parameters.nal_hrd_parameters.cpb_cnt_minus1; i++)\n        {\n            /* initial_cpb_removal_delay[i] */\n            BitstreamReadBits(stream, currSPS->vui_parameters.nal_hrd_parameters.cpb_removal_delay_length_minus1 + 1, &temp);\n            /*initial _cpb_removal_delay_offset[i] */\n            BitstreamReadBits(stream, currSPS->vui_parameters.nal_hrd_parameters.cpb_removal_delay_length_minus1 + 1, &temp);\n        }\n    }\n\n    if (currSPS->vui_parameters.vcl_hrd_parameters_present_flag)\n    {\n        for (i = 0; i <= currSPS->vui_parameters.vcl_hrd_parameters.cpb_cnt_minus1; i++)\n        {\n            /* initial_cpb_removal_delay[i] */\n            BitstreamReadBits(stream, currSPS->vui_parameters.vcl_hrd_parameters.cpb_removal_delay_length_minus1 + 1, &temp);\n            /*initial _cpb_removal_delay_offset[i] */\n            BitstreamReadBits(stream, currSPS->vui_parameters.vcl_hrd_parameters.cpb_removal_delay_length_minus1 + 1, &temp);\n        }\n    }\n\n    return AVCDEC_SUCCESS;\n}\nAVCDec_Status pic_timing(AVCDecObject *decvid, AVCDecBitstream *stream)\n{\n    AVCSeqParamSet *currSPS;\n    uint temp, NumClockTs = 0, time_offset_length = 24, full_timestamp_flag;\n    uint i;\n\n    currSPS = decvid->seqParams[decvid->common->seq_parameter_set_id];\n\n    if (currSPS->vui_parameters.nal_hrd_parameters_present_flag)\n    {\n        BitstreamReadBits(stream, currSPS->vui_parameters.nal_hrd_parameters.cpb_removal_delay_length_minus1 + 1, &temp);\n        BitstreamReadBits(stream, currSPS->vui_parameters.nal_hrd_parameters.dpb_output_delay_length_minus1 + 1, &temp);\n        time_offset_length = currSPS->vui_parameters.nal_hrd_parameters.time_offset_length;\n    }\n    else if (currSPS->vui_parameters.vcl_hrd_parameters_present_flag)\n    {\n        BitstreamReadBits(stream, currSPS->vui_parameters.vcl_hrd_parameters.cpb_removal_delay_length_minus1 + 1, &temp);\n        BitstreamReadBits(stream, currSPS->vui_parameters.vcl_hrd_parameters.dpb_output_delay_length_minus1 + 1, &temp);\n        time_offset_length = currSPS->vui_parameters.vcl_hrd_parameters.time_offset_length;\n    }\n\n    if (currSPS->vui_parameters.pic_struct_present_flag)\n    {\n        /* pic_struct */\n        BitstreamReadBits(stream, 4, &temp);\n\n        switch (temp)\n        {\n            case 0:\n            case 1:\n            case 2:\n                NumClockTs = 1;\n                break;\n            case 3:\n            case 4:\n            case 7:\n                NumClockTs = 2;\n                break;\n            case 5:\n            case 6:\n            case 8:\n                NumClockTs = 3;\n                break;\n            default:\n                NumClockTs = 0;\n                break;\n        }\n\n        for (i = 0; i < NumClockTs; i++)\n        {\n            /* clock_timestamp_flag[i] */\n            BitstreamRead1Bit(stream, &temp);\n            if (temp)\n            {\n                /* ct_type */\n                BitstreamReadBits(stream, 2, &temp);\n                /* nuit_field_based_flag */\n                BitstreamRead1Bit(stream, &temp);\n                /* counting_type        */\n                BitstreamReadBits(stream, 5, &temp);\n                /* full_timestamp_flag */\n                BitstreamRead1Bit(stream, &temp);\n                full_timestamp_flag = temp;\n                /* discontinuity_flag */\n                BitstreamRead1Bit(stream, &temp);\n                /* cnt_dropped_flag */\n                BitstreamRead1Bit(stream, &temp);\n                /* n_frames           */\n                BitstreamReadBits(stream, 8, &temp);\n\n\n                if (full_timestamp_flag)\n                {\n                    /* seconds_value */\n                    BitstreamReadBits(stream, 6, &temp);\n                    /* minutes_value */\n                    BitstreamReadBits(stream, 6, &temp);\n                    /* hours_value */\n                    BitstreamReadBits(stream, 5, &temp);\n                }\n                else\n                {\n                    /* seconds_flag  */\n                    BitstreamRead1Bit(stream, &temp);\n                    if (temp)\n                    {\n                        /* seconds_value */\n                        BitstreamReadBits(stream, 6, &temp);\n                        /* minutes_flag  */\n                        BitstreamRead1Bit(stream, &temp);\n                        if (temp)\n                        {\n                            /* minutes_value */\n                            BitstreamReadBits(stream, 6, &temp);\n\n                            /* hourss_flag  */\n                            BitstreamRead1Bit(stream, &temp);\n\n                            if (temp)\n                            {\n                                /* hours_value */\n                                BitstreamReadBits(stream, 5, &temp);\n                            }\n\n                        }\n                    }\n                }\n\n                if (time_offset_length)\n                {\n                    /* time_offset */\n                    BitstreamReadBits(stream, time_offset_length, &temp);\n                }\n                else\n                {\n                    /* time_offset */\n                    temp = 0;\n                }\n            }\n        }\n    }\n    return AVCDEC_SUCCESS;\n}\nAVCDec_Status recovery_point(AVCDecObject *decvid, AVCDecBitstream *stream)\n{\n    OSCL_UNUSED_ARG(decvid);\n    uint temp;\n    /* recover_frame_cnt */\n    ue_v(stream, &temp);\n    /* exact_match_flag */\n    BitstreamRead1Bit(stream, &temp);\n    /* broken_link_flag */\n    BitstreamRead1Bit(stream, &temp);\n    /* changing slic_group_idc */\n    BitstreamReadBits(stream, 2, &temp);\n    return AVCDEC_SUCCESS;\n}\nAVCDec_Status dec_ref_pic_marking_repetition(AVCDecObject *decvid, AVCDecBitstream *stream)\n{\n    AVCSeqParamSet *currSPS;\n    uint temp;\n    currSPS = decvid->seqParams[decvid->common->seq_parameter_set_id];\n    /* original_idr_flag */\n    BitstreamRead1Bit(stream, &temp);\n    /* original_frame_num */\n    ue_v(stream, &temp);\n    if (currSPS->frame_mbs_only_flag == 0)\n    {\n        /* original_field_pic_flag */\n        BitstreamRead1Bit(stream, &temp);\n        if (temp)\n        {\n            /* original_bottom_field_flag */\n            BitstreamRead1Bit(stream, &temp);\n        }\n    }\n\n    /*  dec_ref_pic_marking(video,stream,sliceHdr); */\n\n\n    return AVCDEC_SUCCESS;\n}\nAVCDec_Status motion_constrained_slice_group_set(AVCDecObject *decvid, AVCDecBitstream *stream)\n{\n    OSCL_UNUSED_ARG(decvid);\n    uint temp, i, numBits;\n    /* num_slice_groups_in_set_minus1 */\n    ue_v(stream, &temp);\n\n    numBits = 0;/* ceil(log2(num_slice_groups_minus1+1)) bits */\n    i = temp;\n    while (i > 0)\n    {\n        numBits++;\n        i >>= 1;\n    }\n    for (i = 0; i <= temp; i++)\n    {\n        /* slice_group_id */\n        BitstreamReadBits(stream, numBits, &temp);\n    }\n    /* exact_sample_value_match_flag */\n    BitstreamRead1Bit(stream, &temp);\n    /* pan_scan_rect_flag */\n    BitstreamRead1Bit(stream, &temp);\n    if (temp)\n    {\n        /* pan_scan_rect_id */\n        ue_v(stream, &temp);\n    }\n\n    return AVCDEC_SUCCESS;\n}\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/src/itrans.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"avclib_common.h\"\n#include \"oscl_mem.h\"\n\n/* input are in the first 16 elements of block,\n   output must be in the location specified in Figure 8-6. */\n/* subclause 8.5.6 */\nvoid Intra16DCTrans(int16 *block, int Qq, int Rq)\n{\n    int m0, m1, m2, m3;\n    int j, offset;\n    int16 *inout;\n    int scale = dequant_coefres[Rq][0];\n\n    inout = block;\n    for (j = 0; j < 4; j++)\n    {\n        m0 = inout[0] + inout[4];\n        m1 = inout[0] - inout[4];\n        m2 = inout[8] + inout[12];\n        m3 = inout[8] - inout[12];\n\n\n        inout[0] = m0 + m2;\n        inout[4] = m0 - m2;\n        inout[8] = m1 - m3;\n        inout[12] = m1 + m3;\n        inout += 64;\n    }\n\n    inout = block;\n\n    if (Qq >= 2)  /* this way should be faster than JM */\n    {           /* they use (((m4*scale)<<(QPy/6))+2)>>2 for both cases. */\n        Qq -= 2;\n        for (j = 0; j < 4; j++)\n        {\n            m0 = inout[0] + inout[64];\n            m1 = inout[0] - inout[64];\n            m2 = inout[128] + inout[192];\n            m3 = inout[128] - inout[192];\n\n            inout[0] = ((m0 + m2) * scale) << Qq;\n            inout[64] = ((m0 - m2) * scale) << Qq;\n            inout[128] = ((m1 - m3) * scale) << Qq;\n            inout[192] = ((m1 + m3) * scale) << Qq;\n            inout += 4;\n        }\n    }\n    else\n    {\n        Qq = 2 - Qq;\n        offset = 1 << (Qq - 1);\n\n        for (j = 0; j < 4; j++)\n        {\n            m0 = inout[0] + inout[64];\n            m1 = inout[0] - inout[64];\n            m2 = inout[128] + inout[192];\n            m3 = inout[128] - inout[192];\n\n            inout[0] = (((m0 + m2) * scale + offset) >> Qq);\n            inout[64] = (((m0 - m2) * scale + offset) >> Qq);\n            inout[128] = (((m1 - m3) * scale + offset) >> Qq);\n            inout[192] = (((m1 + m3) * scale + offset) >> Qq);\n            inout += 4;\n        }\n    }\n\n    return ;\n}\n\n/* see subclase 8.5.8 */\nvoid itrans(int16 *block, uint8 *pred, uint8 *cur, int width)\n{\n    int e0, e1, e2, e3; /* note, at every step of the calculation, these values */\n    /* shall never exceed 16bit sign value, but we don't check */\n    int i;           /* to save the cycles. */\n    int16 *inout;\n\n    inout = block;\n\n    for (i = 4; i > 0; i--)\n    {\n        e0 = inout[0] + inout[2];\n        e1 = inout[0] - inout[2];\n        e2 = (inout[1] >> 1) - inout[3];\n        e3 = inout[1] + (inout[3] >> 1);\n\n        inout[0] = e0 + e3;\n        inout[1] = e1 + e2;\n        inout[2] = e1 - e2;\n        inout[3] = e0 - e3;\n\n        inout += 16;\n    }\n\n    for (i = 4; i > 0; i--)\n    {\n        e0 = block[0] + block[32];\n        e1 = block[0] - block[32];\n        e2 = (block[16] >> 1) - block[48];\n        e3 = block[16] + (block[48] >> 1);\n\n        e0 += e3;\n        e3 = (e0 - (e3 << 1)); /* e0-e3 */\n        e1 += e2;\n        e2 = (e1 - (e2 << 1)); /* e1-e2 */\n        e0 += 32;\n        e1 += 32;\n        e2 += 32;\n        e3 += 32;\n#ifdef USE_PRED_BLOCK\n        e0 = pred[0] + (e0 >> 6);\n        if ((uint)e0 > 0xFF)   e0 = 0xFF & (~(e0 >> 31));  /* clip */\n        e1 = pred[20] + (e1 >> 6);\n        if ((uint)e1 > 0xFF)   e1 = 0xFF & (~(e1 >> 31));  /* clip */\n        e2 = pred[40] + (e2 >> 6);\n        if ((uint)e2 > 0xFF)   e2 = 0xFF & (~(e2 >> 31));  /* clip */\n        e3 = pred[60] + (e3 >> 6);\n        if ((uint)e3 > 0xFF)   e3 = 0xFF & (~(e3 >> 31));  /* clip */\n        *cur = e0;\n        *(cur += width) = e1;\n        *(cur += width) = e2;\n        cur[width] = e3;\n        cur -= (width << 1);\n        cur++;\n        pred++;\n#else\n        OSCL_UNUSED_ARG(pred);\n\n        e0 = *cur + (e0 >> 6);\n        if ((uint)e0 > 0xFF)   e0 = 0xFF & (~(e0 >> 31));  /* clip */\n        *cur = e0;\n        e1 = *(cur += width) + (e1 >> 6);\n        if ((uint)e1 > 0xFF)   e1 = 0xFF & (~(e1 >> 31));  /* clip */\n        *cur = e1;\n        e2 = *(cur += width) + (e2 >> 6);\n        if ((uint)e2 > 0xFF)   e2 = 0xFF & (~(e2 >> 31));  /* clip */\n        *cur = e2;\n        e3 = cur[width] + (e3 >> 6);\n        if ((uint)e3 > 0xFF)   e3 = 0xFF & (~(e3 >> 31));  /* clip */\n        cur[width] = e3;\n        cur -= (width << 1);\n        cur++;\n#endif\n        block++;\n    }\n\n    return ;\n}\n\n/* see subclase 8.5.8 */\nvoid ictrans(int16 *block, uint8 *pred, uint8 *cur, int width)\n{\n    int e0, e1, e2, e3; /* note, at every step of the calculation, these values */\n    /* shall never exceed 16bit sign value, but we don't check */\n    int i;           /* to save the cycles. */\n    int16 *inout;\n\n    inout = block;\n\n    for (i = 4; i > 0; i--)\n    {\n        e0 = inout[0] + inout[2];\n        e1 = inout[0] - inout[2];\n        e2 = (inout[1] >> 1) - inout[3];\n        e3 = inout[1] + (inout[3] >> 1);\n\n        inout[0] = e0 + e3;\n        inout[1] = e1 + e2;\n        inout[2] = e1 - e2;\n        inout[3] = e0 - e3;\n\n        inout += 16;\n    }\n\n    for (i = 4; i > 0; i--)\n    {\n        e0 = block[0] + block[32];\n        e1 = block[0] - block[32];\n        e2 = (block[16] >> 1) - block[48];\n        e3 = block[16] + (block[48] >> 1);\n\n        e0 += e3;\n        e3 = (e0 - (e3 << 1)); /* e0-e3 */\n        e1 += e2;\n        e2 = (e1 - (e2 << 1)); /* e1-e2 */\n        e0 += 32;\n        e1 += 32;\n        e2 += 32;\n        e3 += 32;\n#ifdef USE_PRED_BLOCK\n        e0 = pred[0] + (e0 >> 6);\n        if ((uint)e0 > 0xFF)   e0 = 0xFF & (~(e0 >> 31));  /* clip */\n        e1 = pred[12] + (e1 >> 6);\n        if ((uint)e1 > 0xFF)   e1 = 0xFF & (~(e1 >> 31));  /* clip */\n        e2 = pred[24] + (e2 >> 6);\n        if ((uint)e2 > 0xFF)   e2 = 0xFF & (~(e2 >> 31));  /* clip */\n        e3 = pred[36] + (e3 >> 6);\n        if ((uint)e3 > 0xFF)   e3 = 0xFF & (~(e3 >> 31));  /* clip */\n        *cur = e0;\n        *(cur += width) = e1;\n        *(cur += width) = e2;\n        cur[width] = e3;\n        cur -= (width << 1);\n        cur++;\n        pred++;\n#else\n        OSCL_UNUSED_ARG(pred);\n\n        e0 = *cur + (e0 >> 6);\n        if ((uint)e0 > 0xFF)   e0 = 0xFF & (~(e0 >> 31));  /* clip */\n        *cur = e0;\n        e1 = *(cur += width) + (e1 >> 6);\n        if ((uint)e1 > 0xFF)   e1 = 0xFF & (~(e1 >> 31));  /* clip */\n        *cur = e1;\n        e2 = *(cur += width) + (e2 >> 6);\n        if ((uint)e2 > 0xFF)   e2 = 0xFF & (~(e2 >> 31));  /* clip */\n        *cur = e2;\n        e3 = cur[width] + (e3 >> 6);\n        if ((uint)e3 > 0xFF)   e3 = 0xFF & (~(e3 >> 31));  /* clip */\n        cur[width] = e3;\n        cur -= (width << 1);\n        cur++;\n#endif\n        block++;\n    }\n\n    return ;\n}\n\n/* see subclause 8.5.7 */\nvoid ChromaDCTrans(int16 *block, int Qq, int Rq)\n{\n    int c00, c01, c10, c11;\n    int f0, f1, f2, f3;\n    int scale = dequant_coefres[Rq][0];\n\n    c00 = block[0] + block[4];\n    c01 = block[0] - block[4];\n    c10 = block[64] + block[68];\n    c11 = block[64] - block[68];\n\n    f0 = c00 + c10;\n    f1 = c01 + c11;\n    f2 = c00 - c10;\n    f3 = c01 - c11;\n\n    if (Qq >= 1)\n    {\n        Qq -= 1;\n        block[0] = (f0 * scale) << Qq;\n        block[4] = (f1 * scale) << Qq;\n        block[64] = (f2 * scale) << Qq;\n        block[68] = (f3 * scale) << Qq;\n    }\n    else\n    {\n        block[0] = (f0 * scale) >> 1;\n        block[4] = (f1 * scale) >> 1;\n        block[64] = (f2 * scale) >> 1;\n        block[68] = (f3 * scale) >> 1;\n    }\n\n    return ;\n}\n\n\nvoid copy_block(uint8 *pred, uint8 *cur, int width, int pred_pitch)\n{\n    uint32 temp;\n\n    temp = *((uint32*)pred);\n    pred += pred_pitch;\n    *((uint32*)cur) = temp;\n    cur += width;\n    temp = *((uint32*)pred);\n    pred += pred_pitch;\n    *((uint32*)cur) = temp;\n    cur += width;\n    temp = *((uint32*)pred);\n    pred += pred_pitch;\n    *((uint32*)cur) = temp;\n    cur += width;\n    temp = *((uint32*)pred);\n    *((uint32*)cur) = temp;\n\n    return ;\n}\n\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/src/pred_inter.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"avcdec_lib.h\"\n#include \"oscl_mem.h\"\n\n\n#define CLIP_RESULT(x)      if((uint)x > 0xFF){ \\\n                 x = 0xFF & (~(x>>31));}\n\n/* (blkwidth << 2) + (dy << 1) + dx */\nstatic void (*const ChromaMC_SIMD[8])(uint8 *, int , int , int , uint8 *, int, int , int) =\n{\n    &ChromaFullMC_SIMD,\n    &ChromaHorizontalMC_SIMD,\n    &ChromaVerticalMC_SIMD,\n    &ChromaDiagonalMC_SIMD,\n    &ChromaFullMC_SIMD,\n    &ChromaHorizontalMC2_SIMD,\n    &ChromaVerticalMC2_SIMD,\n    &ChromaDiagonalMC2_SIMD\n};\n/* Perform motion prediction and compensation with residue if exist. */\nvoid InterMBPrediction(AVCCommonObj *video)\n{\n    AVCMacroblock *currMB = video->currMB;\n    AVCPictureData *currPic = video->currPic;\n    int mbPartIdx, subMbPartIdx;\n    int ref_idx;\n    int offset_MbPart_indx = 0;\n    int16 *mv;\n    uint32 x_pos, y_pos;\n    uint8 *curL, *curCb, *curCr;\n    uint8 *ref_l, *ref_Cb, *ref_Cr;\n    uint8 *predBlock, *predCb, *predCr;\n    int block_x, block_y, offset_x, offset_y, offsetP, offset;\n    int x_position = (video->mb_x << 4);\n    int y_position = (video->mb_y << 4);\n    int MbHeight, MbWidth, mbPartIdx_X, mbPartIdx_Y, offset_indx;\n    int picWidth = currPic->pitch;\n    int picHeight = currPic->height;\n    int16 *dataBlock;\n    uint32 cbp4x4;\n    uint32 tmp_word;\n\n    tmp_word = y_position * picWidth;\n    curL = currPic->Sl + tmp_word + x_position;\n    offset = (tmp_word >> 2) + (x_position >> 1);\n    curCb = currPic->Scb + offset;\n    curCr = currPic->Scr + offset;\n\n#ifdef USE_PRED_BLOCK\n    predBlock = video->pred + 84;\n    predCb = video->pred + 452;\n    predCr = video->pred + 596;\n#else\n    predBlock = curL;\n    predCb = curCb;\n    predCr = curCr;\n#endif\n\n    GetMotionVectorPredictor(video, false);\n\n    for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)\n    {\n        MbHeight = currMB->SubMbPartHeight[mbPartIdx];\n        MbWidth = currMB->SubMbPartWidth[mbPartIdx];\n        mbPartIdx_X = ((mbPartIdx + offset_MbPart_indx) & 1);\n        mbPartIdx_Y = (mbPartIdx + offset_MbPart_indx) >> 1;\n        ref_idx = currMB->ref_idx_L0[(mbPartIdx_Y << 1) + mbPartIdx_X];\n        offset_indx = 0;\n\n        ref_l = video->RefPicList0[ref_idx]->Sl;\n        ref_Cb = video->RefPicList0[ref_idx]->Scb;\n        ref_Cr = video->RefPicList0[ref_idx]->Scr;\n\n        for (subMbPartIdx = 0; subMbPartIdx < currMB->NumSubMbPart[mbPartIdx]; subMbPartIdx++)\n        {\n            block_x = (mbPartIdx_X << 1) + ((subMbPartIdx + offset_indx) & 1);  // check this\n            block_y = (mbPartIdx_Y << 1) + (((subMbPartIdx + offset_indx) >> 1) & 1);\n            mv = (int16*)(currMB->mvL0 + block_x + (block_y << 2));\n            offset_x = x_position + (block_x << 2);\n            offset_y = y_position + (block_y << 2);\n            x_pos = (offset_x << 2) + *mv++;   /*quarter pel */\n            y_pos = (offset_y << 2) + *mv;   /*quarter pel */\n\n            //offset = offset_y * currPic->width;\n            //offsetC = (offset >> 2) + (offset_x >> 1);\n#ifdef USE_PRED_BLOCK\n            offsetP = (block_y * 80) + (block_x << 2);\n            LumaMotionComp(ref_l, picWidth, picHeight, x_pos, y_pos,\n                           /*comp_Sl + offset + offset_x,*/\n                           predBlock + offsetP, 20, MbWidth, MbHeight);\n#else\n            offsetP = (block_y << 2) * picWidth + (block_x << 2);\n            LumaMotionComp(ref_l, picWidth, picHeight, x_pos, y_pos,\n                           /*comp_Sl + offset + offset_x,*/\n                           predBlock + offsetP, picWidth, MbWidth, MbHeight);\n#endif\n\n#ifdef USE_PRED_BLOCK\n            offsetP = (block_y * 24) + (block_x << 1);\n            ChromaMotionComp(ref_Cb, picWidth >> 1, picHeight >> 1, x_pos, y_pos,\n                             /*comp_Scb +  offsetC,*/\n                             predCb + offsetP, 12, MbWidth >> 1, MbHeight >> 1);\n            ChromaMotionComp(ref_Cr, picWidth >> 1, picHeight >> 1, x_pos, y_pos,\n                             /*comp_Scr +  offsetC,*/\n                             predCr + offsetP, 12, MbWidth >> 1, MbHeight >> 1);\n#else\n            offsetP = (block_y * picWidth) + (block_x << 1);\n            ChromaMotionComp(ref_Cb, picWidth >> 1, picHeight >> 1, x_pos, y_pos,\n                             /*comp_Scb +  offsetC,*/\n                             predCb + offsetP, picWidth >> 1, MbWidth >> 1, MbHeight >> 1);\n            ChromaMotionComp(ref_Cr, picWidth >> 1, picHeight >> 1, x_pos, y_pos,\n                             /*comp_Scr +  offsetC,*/\n                             predCr + offsetP, picWidth >> 1, MbWidth >> 1, MbHeight >> 1);\n#endif\n\n            offset_indx = currMB->SubMbPartWidth[mbPartIdx] >> 3;\n        }\n        offset_MbPart_indx = currMB->MbPartWidth >> 4;\n    }\n\n    /* used in decoder, used to be if(!encFlag)  */\n\n    /* transform in raster scan order */\n    dataBlock = video->block;\n    cbp4x4 = video->cbp4x4;\n    /* luma */\n    for (block_y = 4; block_y > 0; block_y--)\n    {\n        for (block_x = 4; block_x > 0; block_x--)\n        {\n#ifdef USE_PRED_BLOCK\n            if (cbp4x4&1)\n            {\n                itrans(dataBlock, predBlock, predBlock, 20);\n            }\n#else\n            if (cbp4x4&1)\n            {\n                itrans(dataBlock, curL, curL, picWidth);\n            }\n#endif\n            cbp4x4 >>= 1;\n            dataBlock += 4;\n#ifdef USE_PRED_BLOCK\n            predBlock += 4;\n#else\n            curL += 4;\n#endif\n        }\n        dataBlock += 48;\n#ifdef USE_PRED_BLOCK\n        predBlock += 64;\n#else\n        curL += ((picWidth << 2) - 16);\n#endif\n    }\n\n    /* chroma */\n    picWidth = (picWidth >> 1);\n    for (block_y = 2; block_y > 0; block_y--)\n    {\n        for (block_x = 2; block_x > 0; block_x--)\n        {\n#ifdef USE_PRED_BLOCK\n            if (cbp4x4&1)\n            {\n                ictrans(dataBlock, predCb, predCb, 12);\n            }\n#else\n            if (cbp4x4&1)\n            {\n                ictrans(dataBlock, curCb, curCb, picWidth);\n            }\n#endif\n            cbp4x4 >>= 1;\n            dataBlock += 4;\n#ifdef USE_PRED_BLOCK\n            predCb += 4;\n#else\n            curCb += 4;\n#endif\n        }\n        for (block_x = 2; block_x > 0; block_x--)\n        {\n#ifdef USE_PRED_BLOCK\n            if (cbp4x4&1)\n            {\n                ictrans(dataBlock, predCr, predCr, 12);\n            }\n#else\n            if (cbp4x4&1)\n            {\n                ictrans(dataBlock, curCr, curCr, picWidth);\n            }\n#endif\n            cbp4x4 >>= 1;\n            dataBlock += 4;\n#ifdef USE_PRED_BLOCK\n            predCr += 4;\n#else\n            curCr += 4;\n#endif\n        }\n        dataBlock += 48;\n#ifdef USE_PRED_BLOCK\n        predCb += 40;\n        predCr += 40;\n#else\n        curCb += ((picWidth << 2) - 8);\n        curCr += ((picWidth << 2) - 8);\n#endif\n    }\n\n#ifdef MB_BASED_DEBLOCK\n    SaveNeighborForIntraPred(video, offset);\n#endif\n\n    return ;\n}\n\n\n/* preform the actual  motion comp here */\nvoid LumaMotionComp(uint8 *ref, int picwidth, int picheight,\n                    int x_pos, int y_pos,\n                    uint8 *pred, int pred_pitch,\n                    int blkwidth, int blkheight)\n{\n    int dx, dy;\n    uint8 temp[24][24]; /* for padding, make the size multiple of 4 for packing */\n    int temp2[21][21]; /* for intermediate results */\n    uint8 *ref2;\n\n    dx = x_pos & 3;\n    dy = y_pos & 3;\n    x_pos = x_pos >> 2;  /* round it to full-pel resolution */\n    y_pos = y_pos >> 2;\n\n    /* perform actual motion compensation */\n    if (dx == 0 && dy == 0)\n    {  /* fullpel position *//* G */\n        if (x_pos >= 0 && x_pos + blkwidth <= picwidth && y_pos >= 0 && y_pos + blkheight <= picheight)\n        {\n            ref += y_pos * picwidth + x_pos;\n            FullPelMC(ref, picwidth, pred, pred_pitch, blkwidth, blkheight);\n        }\n        else\n        {\n            CreatePad(ref, picwidth, picheight, x_pos, y_pos, &temp[0][0], blkwidth, blkheight);\n            FullPelMC(&temp[0][0], 24, pred, pred_pitch, blkwidth, blkheight);\n        }\n\n    }   /* other positions */\n    else  if (dy == 0)\n    { /* no vertical interpolation *//* a,b,c*/\n\n        if (x_pos - 2 >= 0 && x_pos + 3 + blkwidth <= picwidth && y_pos >= 0 && y_pos + blkheight <= picheight)\n        {\n            ref += y_pos * picwidth + x_pos;\n\n            HorzInterp1MC(ref, picwidth, pred, pred_pitch, blkwidth, blkheight, dx);\n        }\n        else  /* need padding */\n        {\n            CreatePad(ref, picwidth, picheight, x_pos - 2, y_pos, &temp[0][0], blkwidth + 5, blkheight);\n\n            HorzInterp1MC(&temp[0][2], 24, pred, pred_pitch, blkwidth, blkheight, dx);\n        }\n    }\n    else if (dx == 0)\n    { /*no horizontal interpolation *//* d,h,n */\n\n        if (x_pos >= 0 && x_pos + blkwidth <= picwidth && y_pos - 2 >= 0 && y_pos + 3 + blkheight <= picheight)\n        {\n            ref += y_pos * picwidth + x_pos;\n\n            VertInterp1MC(ref, picwidth, pred, pred_pitch, blkwidth, blkheight, dy);\n        }\n        else  /* need padding */\n        {\n            CreatePad(ref, picwidth, picheight, x_pos, y_pos - 2, &temp[0][0], blkwidth, blkheight + 5);\n\n            VertInterp1MC(&temp[2][0], 24, pred, pred_pitch, blkwidth, blkheight, dy);\n        }\n    }\n    else if (dy == 2)\n    {  /* horizontal cross *//* i, j, k */\n\n        if (x_pos - 2 >= 0 && x_pos + 3 + blkwidth <= picwidth && y_pos - 2 >= 0 && y_pos + 3 + blkheight <= picheight)\n        {\n            ref += y_pos * picwidth + x_pos - 2; /* move to the left 2 pixels */\n\n            VertInterp2MC(ref, picwidth, &temp2[0][0], 21, blkwidth + 5, blkheight);\n\n            HorzInterp2MC(&temp2[0][2], 21, pred, pred_pitch, blkwidth, blkheight, dx);\n        }\n        else /* need padding */\n        {\n            CreatePad(ref, picwidth, picheight, x_pos - 2, y_pos - 2, &temp[0][0], blkwidth + 5, blkheight + 5);\n\n            VertInterp2MC(&temp[2][0], 24, &temp2[0][0], 21, blkwidth + 5, blkheight);\n\n            HorzInterp2MC(&temp2[0][2], 21, pred, pred_pitch, blkwidth, blkheight, dx);\n        }\n    }\n    else if (dx == 2)\n    { /* vertical cross */ /* f,q */\n\n        if (x_pos - 2 >= 0 && x_pos + 3 + blkwidth <= picwidth && y_pos - 2 >= 0 && y_pos + 3 + blkheight <= picheight)\n        {\n            ref += (y_pos - 2) * picwidth + x_pos; /* move to up 2 lines */\n\n            HorzInterp3MC(ref, picwidth, &temp2[0][0], 21, blkwidth, blkheight + 5);\n            VertInterp3MC(&temp2[2][0], 21, pred, pred_pitch, blkwidth, blkheight, dy);\n        }\n        else  /* need padding */\n        {\n            CreatePad(ref, picwidth, picheight, x_pos - 2, y_pos - 2, &temp[0][0], blkwidth + 5, blkheight + 5);\n            HorzInterp3MC(&temp[0][2], 24, &temp2[0][0], 21, blkwidth, blkheight + 5);\n            VertInterp3MC(&temp2[2][0], 21, pred, pred_pitch, blkwidth, blkheight, dy);\n        }\n    }\n    else\n    { /* diagonal *//* e,g,p,r */\n\n        if (x_pos - 2 >= 0 && x_pos + 3 + (dx / 2) + blkwidth <= picwidth &&\n                y_pos - 2 >= 0 && y_pos + 3 + blkheight + (dy / 2) <= picheight)\n        {\n            ref2 = ref + (y_pos + (dy / 2)) * picwidth + x_pos;\n\n            ref += (y_pos * picwidth) + x_pos + (dx / 2);\n\n            DiagonalInterpMC(ref2, ref, picwidth, pred, pred_pitch, blkwidth, blkheight);\n        }\n        else  /* need padding */\n        {\n            CreatePad(ref, picwidth, picheight, x_pos - 2, y_pos - 2, &temp[0][0], blkwidth + 5 + (dx / 2), blkheight + 5 + (dy / 2));\n\n            ref2 = &temp[2 + (dy/2)][2];\n\n            ref = &temp[2][2 + (dx/2)];\n\n            DiagonalInterpMC(ref2, ref, 24, pred, pred_pitch, blkwidth, blkheight);\n        }\n    }\n\n    return ;\n}\n\nvoid CreateAlign(uint8 *ref, int picwidth, int y_pos,\n                 uint8 *out, int blkwidth, int blkheight)\n{\n    int i, j;\n    int offset, out_offset;\n    uint32 prev_pix, result, pix1, pix2, pix4;\n\n    out_offset = 24 - blkwidth;\n\n    //switch(x_pos&0x3){\n    switch (((uint32)ref)&0x3)\n    {\n        case 1:\n            ref += y_pos * picwidth;\n            offset =  picwidth - blkwidth - 3;\n            for (j = 0; j < blkheight; j++)\n            {\n                pix1 = *ref++;\n                pix2 = *((uint16*)ref);\n                ref += 2;\n                result = (pix2 << 8) | pix1;\n\n                for (i = 3; i < blkwidth; i += 4)\n                {\n                    pix4 = *((uint32*)ref);\n                    ref += 4;\n                    prev_pix = (pix4 << 24) & 0xFF000000; /* mask out byte belong to previous word */\n                    result |= prev_pix;\n                    *((uint32*)out) = result;  /* write 4 bytes */\n                    out += 4;\n                    result = pix4 >> 8; /* for the next loop */\n                }\n                ref += offset;\n                out += out_offset;\n            }\n            break;\n        case 2:\n            ref += y_pos * picwidth;\n            offset =  picwidth - blkwidth - 2;\n            for (j = 0; j < blkheight; j++)\n            {\n                result = *((uint16*)ref);\n                ref += 2;\n                for (i = 2; i < blkwidth; i += 4)\n                {\n                    pix4 = *((uint32*)ref);\n                    ref += 4;\n                    prev_pix = (pix4 << 16) & 0xFFFF0000; /* mask out byte belong to previous word */\n                    result |= prev_pix;\n                    *((uint32*)out) = result;  /* write 4 bytes */\n                    out += 4;\n                    result = pix4 >> 16; /* for the next loop */\n                }\n                ref += offset;\n                out += out_offset;\n            }\n            break;\n        case 3:\n            ref += y_pos * picwidth;\n            offset =  picwidth - blkwidth - 1;\n            for (j = 0; j < blkheight; j++)\n            {\n                result = *ref++;\n                for (i = 1; i < blkwidth; i += 4)\n                {\n                    pix4 = *((uint32*)ref);\n                    ref += 4;\n                    prev_pix = (pix4 << 8) & 0xFFFFFF00; /* mask out byte belong to previous word */\n                    result |= prev_pix;\n                    *((uint32*)out) = result;  /* write 4 bytes */\n                    out += 4;\n                    result = pix4 >> 24; /* for the next loop */\n                }\n                ref += offset;\n                out += out_offset;\n            }\n            break;\n    }\n}\n\nvoid CreatePad(uint8 *ref, int picwidth, int picheight, int x_pos, int y_pos,\n               uint8 *out, int blkwidth, int blkheight)\n{\n    int x_inc0, x_mid;\n    int y_inc, y_inc0, y_inc1, y_mid;\n    int i, j;\n    int offset;\n\n    if (x_pos < 0)\n    {\n        x_inc0 = 0;  /* increment for the first part */\n        x_mid = ((blkwidth + x_pos > 0) ? -x_pos : blkwidth);  /* stopping point */\n        x_pos = 0;\n    }\n    else if (x_pos + blkwidth > picwidth)\n    {\n        x_inc0 = 1;  /* increasing */\n        x_mid = ((picwidth > x_pos) ? picwidth - x_pos - 1 : 0);  /* clip negative to zero, encode fool proof! */\n    }\n    else    /* normal case */\n    {\n        x_inc0 = 1;\n        x_mid = blkwidth; /* just one run */\n    }\n\n\n    /* boundary for y_pos, taking the result from x_pos into account */\n    if (y_pos < 0)\n    {\n        y_inc0 = (x_inc0 ? - x_mid : -blkwidth + x_mid); /* offset depending on x_inc1 and x_inc0 */\n        y_inc1 = picwidth + y_inc0;\n        y_mid = ((blkheight + y_pos > 0) ? -y_pos : blkheight); /* clip to prevent memory corruption */\n        y_pos = 0;\n    }\n    else  if (y_pos + blkheight > picheight)\n    {\n        y_inc1 = (x_inc0 ? - x_mid : -blkwidth + x_mid); /* saturate */\n        y_inc0 = picwidth + y_inc1;                 /* increasing */\n        y_mid = ((picheight > y_pos) ? picheight - 1 - y_pos : 0);\n    }\n    else  /* normal case */\n    {\n        y_inc1 = (x_inc0 ? - x_mid : -blkwidth + x_mid);\n        y_inc0 = picwidth + y_inc1;\n        y_mid = blkheight;\n    }\n\n    /* clip y_pos and x_pos */\n    if (y_pos > picheight - 1) y_pos = picheight - 1;\n    if (x_pos > picwidth - 1) x_pos = picwidth - 1;\n\n    ref += y_pos * picwidth + x_pos;\n\n    y_inc = y_inc0;  /* start with top half */\n\n    offset = 24 - blkwidth; /* to use in offset out */\n    blkwidth -= x_mid; /* to use in the loop limit */\n\n    if (x_inc0 == 0)\n    {\n        for (j = 0; j < blkheight; j++)\n        {\n            if (j == y_mid)  /* put a check here to reduce the code size (for unrolling the loop) */\n            {\n                y_inc = y_inc1;  /* switch to lower half */\n            }\n            for (i = x_mid; i > 0; i--)   /* first or third quarter */\n            {\n                *out++ = *ref;\n            }\n            for (i = blkwidth; i > 0; i--)  /* second or fourth quarter */\n            {\n                *out++ = *ref++;\n            }\n            out += offset;\n            ref += y_inc;\n        }\n    }\n    else\n    {\n        for (j = 0; j < blkheight; j++)\n        {\n            if (j == y_mid)  /* put a check here to reduce the code size (for unrolling the loop) */\n            {\n                y_inc = y_inc1;  /* switch to lower half */\n            }\n            for (i = x_mid; i > 0; i--)   /* first or third quarter */\n            {\n                *out++ = *ref++;\n            }\n            for (i = blkwidth; i > 0; i--)  /* second or fourth quarter */\n            {\n                *out++ = *ref;\n            }\n            out += offset;\n            ref += y_inc;\n        }\n    }\n\n    return ;\n}\n\nvoid HorzInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch,\n                   int blkwidth, int blkheight, int dx)\n{\n    uint8 *p_ref;\n    uint32 *p_cur;\n    uint32 tmp, pkres;\n    int result, curr_offset, ref_offset;\n    int j;\n    int32 r0, r1, r2, r3, r4, r5;\n    int32 r13, r6;\n\n    p_cur = (uint32*)out; /* assume it's word aligned */\n    curr_offset = (outpitch - blkwidth) >> 2;\n    p_ref = in;\n    ref_offset = inpitch - blkwidth;\n\n    if (dx&1)\n    {\n        dx = ((dx >> 1) ? -3 : -4); /* use in 3/4 pel */\n        p_ref -= 2;\n        r13 = 0;\n        for (j = blkheight; j > 0; j--)\n        {\n            tmp = (uint32)(p_ref + blkwidth);\n            r0 = p_ref[0];\n            r1 = p_ref[2];\n            r0 |= (r1 << 16);           /* 0,c,0,a */\n            r1 = p_ref[1];\n            r2 = p_ref[3];\n            r1 |= (r2 << 16);           /* 0,d,0,b */\n            while ((uint32)p_ref < tmp)\n            {\n                r2 = *(p_ref += 4); /* move pointer to e */\n                r3 = p_ref[2];\n                r2 |= (r3 << 16);           /* 0,g,0,e */\n                r3 = p_ref[1];\n                r4 = p_ref[3];\n                r3 |= (r4 << 16);           /* 0,h,0,f */\n\n                r4 = r0 + r3;       /* c+h, a+f */\n                r5 = r0 + r1;   /* c+d, a+b */\n                r6 = r2 + r3;   /* g+h, e+f */\n                r5 >>= 16;\n                r5 |= (r6 << 16);   /* e+f, c+d */\n                r4 += r5 * 20;      /* c+20*e+20*f+h, a+20*c+20*d+f */\n                r4 += 0x100010; /* +16, +16 */\n                r5 = r1 + r2;       /* d+g, b+e */\n                r4 -= r5 * 5;       /* c-5*d+20*e+20*f-5*g+h, a-5*b+20*c+20*d-5*e+f */\n                r4 >>= 5;\n                r13 |= r4;      /* check clipping */\n\n                r5 = p_ref[dx+2];\n                r6 = p_ref[dx+4];\n                r5 |= (r6 << 16);\n                r4 += r5;\n                r4 += 0x10001;\n                r4 = (r4 >> 1) & 0xFF00FF;\n\n                r5 = p_ref[4];  /* i */\n                r6 = (r5 << 16);\n                r5 = r6 | (r2 >> 16);/* 0,i,0,g */\n                r5 += r1;       /* d+i, b+g */ /* r5 not free */\n                r1 >>= 16;\n                r1 |= (r3 << 16); /* 0,f,0,d */ /* r1 has changed */\n                r1 += r2;       /* f+g, d+e */\n                r5 += 20 * r1;  /* d+20f+20g+i, b+20d+20e+g */\n                r0 >>= 16;\n                r0 |= (r2 << 16); /* 0,e,0,c */ /* r0 has changed */\n                r0 += r3;       /* e+h, c+f */\n                r5 += 0x100010; /* 16,16 */\n                r5 -= r0 * 5;       /* d-5e+20f+20g-5h+i, b-5c+20d+20e-5f+g */\n                r5 >>= 5;\n                r13 |= r5;      /* check clipping */\n\n                r0 = p_ref[dx+3];\n                r1 = p_ref[dx+5];\n                r0 |= (r1 << 16);\n                r5 += r0;\n                r5 += 0x10001;\n                r5 = (r5 >> 1) & 0xFF00FF;\n\n                r4 |= (r5 << 8);    /* pack them together */\n                *p_cur++ = r4;\n                r1 = r3;\n                r0 = r2;\n            }\n            p_cur += curr_offset; /* move to the next line */\n            p_ref += ref_offset;  /*    ref_offset = inpitch-blkwidth; */\n\n            if (r13&0xFF000700) /* need clipping */\n            {\n                /* move back to the beginning of the line */\n                p_ref -= (ref_offset + blkwidth);   /* input */\n                p_cur -= (outpitch >> 2);\n\n                tmp = (uint32)(p_ref + blkwidth);\n                for (; (uint32)p_ref < tmp;)\n                {\n\n                    r0 = *p_ref++;\n                    r1 = *p_ref++;\n                    r2 = *p_ref++;\n                    r3 = *p_ref++;\n                    r4 = *p_ref++;\n                    /* first pixel */\n                    r5 = *p_ref++;\n                    result = (r0 + r5);\n                    r0 = (r1 + r4);\n                    result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n                    r0 = (r2 + r3);\n                    result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    /* 3/4 pel,  no need to clip */\n                    result = (result + p_ref[dx] + 1);\n                    pkres = (result >> 1) ;\n                    /* second pixel */\n                    r0 = *p_ref++;\n                    result = (r1 + r0);\n                    r1 = (r2 + r5);\n                    result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n                    r1 = (r3 + r4);\n                    result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    /* 3/4 pel,  no need to clip */\n                    result = (result + p_ref[dx] + 1);\n                    result = (result >> 1);\n                    pkres  |= (result << 8);\n                    /* third pixel */\n                    r1 = *p_ref++;\n                    result = (r2 + r1);\n                    r2 = (r3 + r0);\n                    result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n                    r2 = (r4 + r5);\n                    result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    /* 3/4 pel,  no need to clip */\n                    result = (result + p_ref[dx] + 1);\n                    result = (result >> 1);\n                    pkres  |= (result << 16);\n                    /* fourth pixel */\n                    r2 = *p_ref++;\n                    result = (r3 + r2);\n                    r3 = (r4 + r1);\n                    result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n                    r3 = (r5 + r0);\n                    result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    /* 3/4 pel,  no need to clip */\n                    result = (result + p_ref[dx] + 1);\n                    result = (result >> 1);\n                    pkres  |= (result << 24);\n                    *p_cur++ = pkres; /* write 4 pixels */\n                    p_ref -= 5;  /* offset back to the middle of filter */\n                }\n                p_cur += curr_offset;  /* move to the next line */\n                p_ref += ref_offset;    /* move to the next line */\n            }\n        }\n    }\n    else\n    {\n        p_ref -= 2;\n        r13 = 0;\n        for (j = blkheight; j > 0; j--)\n        {\n            tmp = (uint32)(p_ref + blkwidth);\n            r0 = p_ref[0];\n            r1 = p_ref[2];\n            r0 |= (r1 << 16);           /* 0,c,0,a */\n            r1 = p_ref[1];\n            r2 = p_ref[3];\n            r1 |= (r2 << 16);           /* 0,d,0,b */\n            while ((uint32)p_ref < tmp)\n            {\n                r2 = *(p_ref += 4); /* move pointer to e */\n                r3 = p_ref[2];\n                r2 |= (r3 << 16);           /* 0,g,0,e */\n                r3 = p_ref[1];\n                r4 = p_ref[3];\n                r3 |= (r4 << 16);           /* 0,h,0,f */\n\n                r4 = r0 + r3;       /* c+h, a+f */\n                r5 = r0 + r1;   /* c+d, a+b */\n                r6 = r2 + r3;   /* g+h, e+f */\n                r5 >>= 16;\n                r5 |= (r6 << 16);   /* e+f, c+d */\n                r4 += r5 * 20;      /* c+20*e+20*f+h, a+20*c+20*d+f */\n                r4 += 0x100010; /* +16, +16 */\n                r5 = r1 + r2;       /* d+g, b+e */\n                r4 -= r5 * 5;       /* c-5*d+20*e+20*f-5*g+h, a-5*b+20*c+20*d-5*e+f */\n                r4 >>= 5;\n                r13 |= r4;      /* check clipping */\n                r4 &= 0xFF00FF; /* mask */\n\n                r5 = p_ref[4];  /* i */\n                r6 = (r5 << 16);\n                r5 = r6 | (r2 >> 16);/* 0,i,0,g */\n                r5 += r1;       /* d+i, b+g */ /* r5 not free */\n                r1 >>= 16;\n                r1 |= (r3 << 16); /* 0,f,0,d */ /* r1 has changed */\n                r1 += r2;       /* f+g, d+e */\n                r5 += 20 * r1;  /* d+20f+20g+i, b+20d+20e+g */\n                r0 >>= 16;\n                r0 |= (r2 << 16); /* 0,e,0,c */ /* r0 has changed */\n                r0 += r3;       /* e+h, c+f */\n                r5 += 0x100010; /* 16,16 */\n                r5 -= r0 * 5;       /* d-5e+20f+20g-5h+i, b-5c+20d+20e-5f+g */\n                r5 >>= 5;\n                r13 |= r5;      /* check clipping */\n                r5 &= 0xFF00FF; /* mask */\n\n                r4 |= (r5 << 8);    /* pack them together */\n                *p_cur++ = r4;\n                r1 = r3;\n                r0 = r2;\n            }\n            p_cur += curr_offset; /* move to the next line */\n            p_ref += ref_offset;  /*    ref_offset = inpitch-blkwidth; */\n\n            if (r13&0xFF000700) /* need clipping */\n            {\n                /* move back to the beginning of the line */\n                p_ref -= (ref_offset + blkwidth);   /* input */\n                p_cur -= (outpitch >> 2);\n\n                tmp = (uint32)(p_ref + blkwidth);\n                for (; (uint32)p_ref < tmp;)\n                {\n\n                    r0 = *p_ref++;\n                    r1 = *p_ref++;\n                    r2 = *p_ref++;\n                    r3 = *p_ref++;\n                    r4 = *p_ref++;\n                    /* first pixel */\n                    r5 = *p_ref++;\n                    result = (r0 + r5);\n                    r0 = (r1 + r4);\n                    result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n                    r0 = (r2 + r3);\n                    result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    pkres  = result;\n                    /* second pixel */\n                    r0 = *p_ref++;\n                    result = (r1 + r0);\n                    r1 = (r2 + r5);\n                    result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n                    r1 = (r3 + r4);\n                    result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    pkres  |= (result << 8);\n                    /* third pixel */\n                    r1 = *p_ref++;\n                    result = (r2 + r1);\n                    r2 = (r3 + r0);\n                    result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n                    r2 = (r4 + r5);\n                    result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    pkres  |= (result << 16);\n                    /* fourth pixel */\n                    r2 = *p_ref++;\n                    result = (r3 + r2);\n                    r3 = (r4 + r1);\n                    result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n                    r3 = (r5 + r0);\n                    result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    pkres  |= (result << 24);\n                    *p_cur++ = pkres;   /* write 4 pixels */\n                    p_ref -= 5;\n                }\n                p_cur += curr_offset; /* move to the next line */\n                p_ref += ref_offset;\n            }\n        }\n    }\n\n    return ;\n}\n\nvoid HorzInterp2MC(int *in, int inpitch, uint8 *out, int outpitch,\n                   int blkwidth, int blkheight, int dx)\n{\n    int *p_ref;\n    uint32 *p_cur;\n    uint32 tmp, pkres;\n    int result, result2, curr_offset, ref_offset;\n    int j, r0, r1, r2, r3, r4, r5;\n\n    p_cur = (uint32*)out; /* assume it's word aligned */\n    curr_offset = (outpitch - blkwidth) >> 2;\n    p_ref = in;\n    ref_offset = inpitch - blkwidth;\n\n    if (dx&1)\n    {\n        dx = ((dx >> 1) ? -3 : -4); /* use in 3/4 pel */\n\n        for (j = blkheight; j > 0 ; j--)\n        {\n            tmp = (uint32)(p_ref + blkwidth);\n            for (; (uint32)p_ref < tmp;)\n            {\n\n                r0 = p_ref[-2];\n                r1 = p_ref[-1];\n                r2 = *p_ref++;\n                r3 = *p_ref++;\n                r4 = *p_ref++;\n                /* first pixel */\n                r5 = *p_ref++;\n                result = (r0 + r5);\n                r0 = (r1 + r4);\n                result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n                r0 = (r2 + r3);\n                result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                result2 = ((p_ref[dx] + 16) >> 5);\n                CLIP_RESULT(result2)\n                /* 3/4 pel,  no need to clip */\n                result = (result + result2 + 1);\n                pkres = (result >> 1);\n                /* second pixel */\n                r0 = *p_ref++;\n                result = (r1 + r0);\n                r1 = (r2 + r5);\n                result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n                r1 = (r3 + r4);\n                result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                result2 = ((p_ref[dx] + 16) >> 5);\n                CLIP_RESULT(result2)\n                /* 3/4 pel,  no need to clip */\n                result = (result + result2 + 1);\n                result = (result >> 1);\n                pkres  |= (result << 8);\n                /* third pixel */\n                r1 = *p_ref++;\n                result = (r2 + r1);\n                r2 = (r3 + r0);\n                result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n                r2 = (r4 + r5);\n                result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                result2 = ((p_ref[dx] + 16) >> 5);\n                CLIP_RESULT(result2)\n                /* 3/4 pel,  no need to clip */\n                result = (result + result2 + 1);\n                result = (result >> 1);\n                pkres  |= (result << 16);\n                /* fourth pixel */\n                r2 = *p_ref++;\n                result = (r3 + r2);\n                r3 = (r4 + r1);\n                result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n                r3 = (r5 + r0);\n                result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                result2 = ((p_ref[dx] + 16) >> 5);\n                CLIP_RESULT(result2)\n                /* 3/4 pel,  no need to clip */\n                result = (result + result2 + 1);\n                result = (result >> 1);\n                pkres  |= (result << 24);\n                *p_cur++ = pkres; /* write 4 pixels */\n                p_ref -= 3;  /* offset back to the middle of filter */\n            }\n            p_cur += curr_offset;  /* move to the next line */\n            p_ref += ref_offset;    /* move to the next line */\n        }\n    }\n    else\n    {\n        for (j = blkheight; j > 0 ; j--)\n        {\n            tmp = (uint32)(p_ref + blkwidth);\n            for (; (uint32)p_ref < tmp;)\n            {\n\n                r0 = p_ref[-2];\n                r1 = p_ref[-1];\n                r2 = *p_ref++;\n                r3 = *p_ref++;\n                r4 = *p_ref++;\n                /* first pixel */\n                r5 = *p_ref++;\n                result = (r0 + r5);\n                r0 = (r1 + r4);\n                result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n                r0 = (r2 + r3);\n                result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                pkres  = result;\n                /* second pixel */\n                r0 = *p_ref++;\n                result = (r1 + r0);\n                r1 = (r2 + r5);\n                result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n                r1 = (r3 + r4);\n                result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                pkres  |= (result << 8);\n                /* third pixel */\n                r1 = *p_ref++;\n                result = (r2 + r1);\n                r2 = (r3 + r0);\n                result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n                r2 = (r4 + r5);\n                result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                pkres  |= (result << 16);\n                /* fourth pixel */\n                r2 = *p_ref++;\n                result = (r3 + r2);\n                r3 = (r4 + r1);\n                result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n                r3 = (r5 + r0);\n                result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                pkres  |= (result << 24);\n                *p_cur++ = pkres; /* write 4 pixels */\n                p_ref -= 3;  /* offset back to the middle of filter */\n            }\n            p_cur += curr_offset;  /* move to the next line */\n            p_ref += ref_offset;    /* move to the next line */\n        }\n    }\n\n    return ;\n}\n\nvoid HorzInterp3MC(uint8 *in, int inpitch, int *out, int outpitch,\n                   int blkwidth, int blkheight)\n{\n    uint8 *p_ref;\n    int   *p_cur;\n    uint32 tmp;\n    int result, curr_offset, ref_offset;\n    int j, r0, r1, r2, r3, r4, r5;\n\n    p_cur = out;\n    curr_offset = (outpitch - blkwidth);\n    p_ref = in;\n    ref_offset = inpitch - blkwidth;\n\n    for (j = blkheight; j > 0 ; j--)\n    {\n        tmp = (uint32)(p_ref + blkwidth);\n        for (; (uint32)p_ref < tmp;)\n        {\n\n            r0 = p_ref[-2];\n            r1 = p_ref[-1];\n            r2 = *p_ref++;\n            r3 = *p_ref++;\n            r4 = *p_ref++;\n            /* first pixel */\n            r5 = *p_ref++;\n            result = (r0 + r5);\n            r0 = (r1 + r4);\n            result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n            r0 = (r2 + r3);\n            result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n            *p_cur++ = result;\n            /* second pixel */\n            r0 = *p_ref++;\n            result = (r1 + r0);\n            r1 = (r2 + r5);\n            result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n            r1 = (r3 + r4);\n            result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n            *p_cur++ = result;\n            /* third pixel */\n            r1 = *p_ref++;\n            result = (r2 + r1);\n            r2 = (r3 + r0);\n            result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n            r2 = (r4 + r5);\n            result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n            *p_cur++ = result;\n            /* fourth pixel */\n            r2 = *p_ref++;\n            result = (r3 + r2);\n            r3 = (r4 + r1);\n            result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n            r3 = (r5 + r0);\n            result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n            *p_cur++ = result;\n            p_ref -= 3; /* move back to the middle of the filter */\n        }\n        p_cur += curr_offset; /* move to the next line */\n        p_ref += ref_offset;\n    }\n\n    return ;\n}\nvoid VertInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch,\n                   int blkwidth, int blkheight, int dy)\n{\n    uint8 *p_cur, *p_ref;\n    uint32 tmp;\n    int result, curr_offset, ref_offset;\n    int j, i;\n    int32 r0, r1, r2, r3, r4, r5, r6, r7, r8, r13;\n    uint8  tmp_in[24][24];\n\n    /* not word-aligned */\n    if (((uint32)in)&0x3)\n    {\n        CreateAlign(in, inpitch, -2, &tmp_in[0][0], blkwidth, blkheight + 5);\n        in = &tmp_in[2][0];\n        inpitch = 24;\n    }\n    p_cur = out;\n    curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically back up and one pixel to right */\n    ref_offset = blkheight * inpitch; /* for limit */\n\n    curr_offset += 3;\n\n    if (dy&1)\n    {\n        dy = (dy >> 1) ? 0 : -inpitch;\n\n        for (j = 0; j < blkwidth; j += 4, in += 4)\n        {\n            r13 = 0;\n            p_ref = in;\n            p_cur -= outpitch;  /* compensate for the first offset */\n            tmp = (uint32)(p_ref + ref_offset); /* limit */\n            while ((uint32)p_ref < tmp)  /* the loop un-rolled  */\n            {\n                r0 = *((uint32*)(p_ref - (inpitch << 1))); /* load 4 bytes */\n                p_ref += inpitch;\n                r6 = (r0 >> 8) & 0xFF00FF; /* second and fourth byte */\n                r0 &= 0xFF00FF;\n\n                r1 = *((uint32*)(p_ref + (inpitch << 1)));  /* r1, r7, ref[3] */\n                r7 = (r1 >> 8) & 0xFF00FF;\n                r1 &= 0xFF00FF;\n\n                r0 += r1;\n                r6 += r7;\n\n                r2 = *((uint32*)p_ref); /* r2, r8, ref[1] */\n                r8 = (r2 >> 8) & 0xFF00FF;\n                r2 &= 0xFF00FF;\n\n                r1 = *((uint32*)(p_ref - inpitch)); /* r1, r7, ref[0] */\n                r7 = (r1 >> 8) & 0xFF00FF;\n                r1 &= 0xFF00FF;\n                r1 += r2;\n\n                r7 += r8;\n\n                r0 += 20 * r1;\n                r6 += 20 * r7;\n                r0 += 0x100010;\n                r6 += 0x100010;\n\n                r2 = *((uint32*)(p_ref - (inpitch << 1))); /* r2, r8, ref[-1] */\n                r8 = (r2 >> 8) & 0xFF00FF;\n                r2 &= 0xFF00FF;\n\n                r1 = *((uint32*)(p_ref + inpitch)); /* r1, r7, ref[2] */\n                r7 = (r1 >> 8) & 0xFF00FF;\n                r1 &= 0xFF00FF;\n                r1 += r2;\n\n                r7 += r8;\n\n                r0 -= 5 * r1;\n                r6 -= 5 * r7;\n\n                r0 >>= 5;\n                r6 >>= 5;\n                /* clip */\n                r13 |= r6;\n                r13 |= r0;\n                //CLIPPACK(r6,result)\n\n                r1 = *((uint32*)(p_ref + dy));\n                r2 = (r1 >> 8) & 0xFF00FF;\n                r1 &= 0xFF00FF;\n                r0 += r1;\n                r6 += r2;\n                r0 += 0x10001;\n                r6 += 0x10001;\n                r0 = (r0 >> 1) & 0xFF00FF;\n                r6 = (r6 >> 1) & 0xFF00FF;\n\n                r0 |= (r6 << 8);  /* pack it back */\n                *((uint32*)(p_cur += outpitch)) = r0;\n            }\n            p_cur += curr_offset; /* offset to the next pixel */\n            if (r13 & 0xFF000700) /* this column need clipping */\n            {\n                p_cur -= 4;\n                for (i = 0; i < 4; i++)\n                {\n                    p_ref = in + i;\n                    p_cur -= outpitch;  /* compensate for the first offset */\n\n                    tmp = (uint32)(p_ref + ref_offset); /* limit */\n                    while ((uint32)p_ref < tmp)\n                    {                           /* loop un-rolled */\n                        r0 = *(p_ref - (inpitch << 1));\n                        r1 = *(p_ref - inpitch);\n                        r2 = *p_ref;\n                        r3 = *(p_ref += inpitch);  /* modify pointer before loading */\n                        r4 = *(p_ref += inpitch);\n                        /* first pixel */\n                        r5 = *(p_ref += inpitch);\n                        result = (r0 + r5);\n                        r0 = (r1 + r4);\n                        result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n                        r0 = (r2 + r3);\n                        result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n                        result = (result + 16) >> 5;\n                        CLIP_RESULT(result)\n                        /* 3/4 pel,  no need to clip */\n                        result = (result + p_ref[dy-(inpitch<<1)] + 1);\n                        result = (result >> 1);\n                        *(p_cur += outpitch) = result;\n                        /* second pixel */\n                        r0 = *(p_ref += inpitch);\n                        result = (r1 + r0);\n                        r1 = (r2 + r5);\n                        result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n                        r1 = (r3 + r4);\n                        result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n                        result = (result + 16) >> 5;\n                        CLIP_RESULT(result)\n                        /* 3/4 pel,  no need to clip */\n                        result = (result + p_ref[dy-(inpitch<<1)] + 1);\n                        result = (result >> 1);\n                        *(p_cur += outpitch) = result;\n                        /* third pixel */\n                        r1 = *(p_ref += inpitch);\n                        result = (r2 + r1);\n                        r2 = (r3 + r0);\n                        result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n                        r2 = (r4 + r5);\n                        result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n                        result = (result + 16) >> 5;\n                        CLIP_RESULT(result)\n                        /* 3/4 pel,  no need to clip */\n                        result = (result + p_ref[dy-(inpitch<<1)] + 1);\n                        result = (result >> 1);\n                        *(p_cur += outpitch) = result;\n                        /* fourth pixel */\n                        r2 = *(p_ref += inpitch);\n                        result = (r3 + r2);\n                        r3 = (r4 + r1);\n                        result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n                        r3 = (r5 + r0);\n                        result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n                        result = (result + 16) >> 5;\n                        CLIP_RESULT(result)\n                        /* 3/4 pel,  no need to clip */\n                        result = (result + p_ref[dy-(inpitch<<1)] + 1);\n                        result = (result >> 1);\n                        *(p_cur += outpitch) = result;\n                        p_ref -= (inpitch << 1);  /* move back to center of the filter of the next one */\n                    }\n                    p_cur += (curr_offset - 3);\n                }\n            }\n        }\n    }\n    else\n    {\n        for (j = 0; j < blkwidth; j += 4, in += 4)\n        {\n            r13 = 0;\n            p_ref = in;\n            p_cur -= outpitch;  /* compensate for the first offset */\n            tmp = (uint32)(p_ref + ref_offset); /* limit */\n            while ((uint32)p_ref < tmp)  /* the loop un-rolled  */\n            {\n                r0 = *((uint32*)(p_ref - (inpitch << 1))); /* load 4 bytes */\n                p_ref += inpitch;\n                r6 = (r0 >> 8) & 0xFF00FF; /* second and fourth byte */\n                r0 &= 0xFF00FF;\n\n                r1 = *((uint32*)(p_ref + (inpitch << 1)));  /* r1, r7, ref[3] */\n                r7 = (r1 >> 8) & 0xFF00FF;\n                r1 &= 0xFF00FF;\n\n                r0 += r1;\n                r6 += r7;\n\n                r2 = *((uint32*)p_ref); /* r2, r8, ref[1] */\n                r8 = (r2 >> 8) & 0xFF00FF;\n                r2 &= 0xFF00FF;\n\n                r1 = *((uint32*)(p_ref - inpitch)); /* r1, r7, ref[0] */\n                r7 = (r1 >> 8) & 0xFF00FF;\n                r1 &= 0xFF00FF;\n                r1 += r2;\n\n                r7 += r8;\n\n                r0 += 20 * r1;\n                r6 += 20 * r7;\n                r0 += 0x100010;\n                r6 += 0x100010;\n\n                r2 = *((uint32*)(p_ref - (inpitch << 1))); /* r2, r8, ref[-1] */\n                r8 = (r2 >> 8) & 0xFF00FF;\n                r2 &= 0xFF00FF;\n\n                r1 = *((uint32*)(p_ref + inpitch)); /* r1, r7, ref[2] */\n                r7 = (r1 >> 8) & 0xFF00FF;\n                r1 &= 0xFF00FF;\n                r1 += r2;\n\n                r7 += r8;\n\n                r0 -= 5 * r1;\n                r6 -= 5 * r7;\n\n                r0 >>= 5;\n                r6 >>= 5;\n                /* clip */\n                r13 |= r6;\n                r13 |= r0;\n                //CLIPPACK(r6,result)\n                r0 &= 0xFF00FF;\n                r6 &= 0xFF00FF;\n                r0 |= (r6 << 8);  /* pack it back */\n                *((uint32*)(p_cur += outpitch)) = r0;\n            }\n            p_cur += curr_offset; /* offset to the next pixel */\n            if (r13 & 0xFF000700) /* this column need clipping */\n            {\n                p_cur -= 4;\n                for (i = 0; i < 4; i++)\n                {\n                    p_ref = in + i;\n                    p_cur -= outpitch;  /* compensate for the first offset */\n                    tmp = (uint32)(p_ref + ref_offset); /* limit */\n                    while ((uint32)p_ref < tmp)\n                    {                           /* loop un-rolled */\n                        r0 = *(p_ref - (inpitch << 1));\n                        r1 = *(p_ref - inpitch);\n                        r2 = *p_ref;\n                        r3 = *(p_ref += inpitch);  /* modify pointer before loading */\n                        r4 = *(p_ref += inpitch);\n                        /* first pixel */\n                        r5 = *(p_ref += inpitch);\n                        result = (r0 + r5);\n                        r0 = (r1 + r4);\n                        result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n                        r0 = (r2 + r3);\n                        result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n                        result = (result + 16) >> 5;\n                        CLIP_RESULT(result)\n                        *(p_cur += outpitch) = result;\n                        /* second pixel */\n                        r0 = *(p_ref += inpitch);\n                        result = (r1 + r0);\n                        r1 = (r2 + r5);\n                        result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n                        r1 = (r3 + r4);\n                        result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n                        result = (result + 16) >> 5;\n                        CLIP_RESULT(result)\n                        *(p_cur += outpitch) = result;\n                        /* third pixel */\n                        r1 = *(p_ref += inpitch);\n                        result = (r2 + r1);\n                        r2 = (r3 + r0);\n                        result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n                        r2 = (r4 + r5);\n                        result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n                        result = (result + 16) >> 5;\n                        CLIP_RESULT(result)\n                        *(p_cur += outpitch) = result;\n                        /* fourth pixel */\n                        r2 = *(p_ref += inpitch);\n                        result = (r3 + r2);\n                        r3 = (r4 + r1);\n                        result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n                        r3 = (r5 + r0);\n                        result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n                        result = (result + 16) >> 5;\n                        CLIP_RESULT(result)\n                        *(p_cur += outpitch) = result;\n                        p_ref -= (inpitch << 1);  /* move back to center of the filter of the next one */\n                    }\n                    p_cur += (curr_offset - 3);\n                }\n            }\n        }\n    }\n\n    return ;\n}\n\nvoid VertInterp2MC(uint8 *in, int inpitch, int *out, int outpitch,\n                   int blkwidth, int blkheight)\n{\n    int *p_cur;\n    uint8 *p_ref;\n    uint32 tmp;\n    int result, curr_offset, ref_offset;\n    int j, r0, r1, r2, r3, r4, r5;\n\n    p_cur = out;\n    curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically back up and one pixel to right */\n    ref_offset = blkheight * inpitch; /* for limit */\n\n    for (j = 0; j < blkwidth; j++)\n    {\n        p_cur -= outpitch; /* compensate for the first offset */\n        p_ref = in++;\n\n        tmp = (uint32)(p_ref + ref_offset); /* limit */\n        while ((uint32)p_ref < tmp)\n        {                           /* loop un-rolled */\n            r0 = *(p_ref - (inpitch << 1));\n            r1 = *(p_ref - inpitch);\n            r2 = *p_ref;\n            r3 = *(p_ref += inpitch);  /* modify pointer before loading */\n            r4 = *(p_ref += inpitch);\n            /* first pixel */\n            r5 = *(p_ref += inpitch);\n            result = (r0 + r5);\n            r0 = (r1 + r4);\n            result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n            r0 = (r2 + r3);\n            result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n            *(p_cur += outpitch) = result;\n            /* second pixel */\n            r0 = *(p_ref += inpitch);\n            result = (r1 + r0);\n            r1 = (r2 + r5);\n            result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n            r1 = (r3 + r4);\n            result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n            *(p_cur += outpitch) = result;\n            /* third pixel */\n            r1 = *(p_ref += inpitch);\n            result = (r2 + r1);\n            r2 = (r3 + r0);\n            result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n            r2 = (r4 + r5);\n            result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n            *(p_cur += outpitch) = result;\n            /* fourth pixel */\n            r2 = *(p_ref += inpitch);\n            result = (r3 + r2);\n            r3 = (r4 + r1);\n            result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n            r3 = (r5 + r0);\n            result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n            *(p_cur += outpitch) = result;\n            p_ref -= (inpitch << 1);  /* move back to center of the filter of the next one */\n        }\n        p_cur += curr_offset;\n    }\n\n    return ;\n}\n\nvoid VertInterp3MC(int *in, int inpitch, uint8 *out, int outpitch,\n                   int blkwidth, int blkheight, int dy)\n{\n    uint8 *p_cur;\n    int *p_ref;\n    uint32 tmp;\n    int result, result2, curr_offset, ref_offset;\n    int j, r0, r1, r2, r3, r4, r5;\n\n    p_cur = out;\n    curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically back up and one pixel to right */\n    ref_offset = blkheight * inpitch; /* for limit */\n\n    if (dy&1)\n    {\n        dy = (dy >> 1) ? -(inpitch << 1) : -(inpitch << 1) - inpitch;\n\n        for (j = 0; j < blkwidth; j++)\n        {\n            p_cur -= outpitch; /* compensate for the first offset */\n            p_ref = in++;\n\n            tmp = (uint32)(p_ref + ref_offset); /* limit */\n            while ((uint32)p_ref < tmp)\n            {                           /* loop un-rolled */\n                r0 = *(p_ref - (inpitch << 1));\n                r1 = *(p_ref - inpitch);\n                r2 = *p_ref;\n                r3 = *(p_ref += inpitch);  /* modify pointer before loading */\n                r4 = *(p_ref += inpitch);\n                /* first pixel */\n                r5 = *(p_ref += inpitch);\n                result = (r0 + r5);\n                r0 = (r1 + r4);\n                result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n                r0 = (r2 + r3);\n                result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                result2 = ((p_ref[dy] + 16) >> 5);\n                CLIP_RESULT(result2)\n                /* 3/4 pel,  no need to clip */\n                result = (result + result2 + 1);\n                result = (result >> 1);\n                *(p_cur += outpitch) = result;\n                /* second pixel */\n                r0 = *(p_ref += inpitch);\n                result = (r1 + r0);\n                r1 = (r2 + r5);\n                result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n                r1 = (r3 + r4);\n                result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                result2 = ((p_ref[dy] + 16) >> 5);\n                CLIP_RESULT(result2)\n                /* 3/4 pel,  no need to clip */\n                result = (result + result2 + 1);\n                result = (result >> 1);\n                *(p_cur += outpitch) = result;\n                /* third pixel */\n                r1 = *(p_ref += inpitch);\n                result = (r2 + r1);\n                r2 = (r3 + r0);\n                result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n                r2 = (r4 + r5);\n                result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                result2 = ((p_ref[dy] + 16) >> 5);\n                CLIP_RESULT(result2)\n                /* 3/4 pel,  no need to clip */\n                result = (result + result2 + 1);\n                result = (result >> 1);\n                *(p_cur += outpitch) = result;\n                /* fourth pixel */\n                r2 = *(p_ref += inpitch);\n                result = (r3 + r2);\n                r3 = (r4 + r1);\n                result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n                r3 = (r5 + r0);\n                result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                result2 = ((p_ref[dy] + 16) >> 5);\n                CLIP_RESULT(result2)\n                /* 3/4 pel,  no need to clip */\n                result = (result + result2 + 1);\n                result = (result >> 1);\n                *(p_cur += outpitch) = result;\n                p_ref -= (inpitch << 1);  /* move back to center of the filter of the next one */\n            }\n            p_cur += curr_offset;\n        }\n    }\n    else\n    {\n        for (j = 0; j < blkwidth; j++)\n        {\n            p_cur -= outpitch; /* compensate for the first offset */\n            p_ref = in++;\n\n            tmp = (uint32)(p_ref + ref_offset); /* limit */\n            while ((uint32)p_ref < tmp)\n            {                           /* loop un-rolled */\n                r0 = *(p_ref - (inpitch << 1));\n                r1 = *(p_ref - inpitch);\n                r2 = *p_ref;\n                r3 = *(p_ref += inpitch);  /* modify pointer before loading */\n                r4 = *(p_ref += inpitch);\n                /* first pixel */\n                r5 = *(p_ref += inpitch);\n                result = (r0 + r5);\n                r0 = (r1 + r4);\n                result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n                r0 = (r2 + r3);\n                result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                *(p_cur += outpitch) = result;\n                /* second pixel */\n                r0 = *(p_ref += inpitch);\n                result = (r1 + r0);\n                r1 = (r2 + r5);\n                result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n                r1 = (r3 + r4);\n                result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                *(p_cur += outpitch) = result;\n                /* third pixel */\n                r1 = *(p_ref += inpitch);\n                result = (r2 + r1);\n                r2 = (r3 + r0);\n                result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n                r2 = (r4 + r5);\n                result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                *(p_cur += outpitch) = result;\n                /* fourth pixel */\n                r2 = *(p_ref += inpitch);\n                result = (r3 + r2);\n                r3 = (r4 + r1);\n                result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n                r3 = (r5 + r0);\n                result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                *(p_cur += outpitch) = result;\n                p_ref -= (inpitch << 1);  /* move back to center of the filter of the next one */\n            }\n            p_cur += curr_offset;\n        }\n    }\n\n    return ;\n}\n\nvoid DiagonalInterpMC(uint8 *in1, uint8 *in2, int inpitch,\n                      uint8 *out, int outpitch,\n                      int blkwidth, int blkheight)\n{\n    int j, i;\n    int result;\n    uint8 *p_cur, *p_ref, *p_tmp8;\n    int curr_offset, ref_offset;\n    uint8 tmp_res[24][24], tmp_in[24][24];\n    uint32 *p_tmp;\n    uint32 tmp, pkres, tmp_result;\n    int32 r0, r1, r2, r3, r4, r5;\n    int32 r6, r7, r8, r9, r10, r13;\n    void *tmp_void;\n\n    ref_offset = inpitch - blkwidth;\n    p_ref = in1 - 2;\n    /* perform horizontal interpolation */\n    /* not word-aligned */\n    /* It is faster to read 1 byte at time to avoid calling CreateAlign */\n    /*  if(((uint32)p_ref)&0x3)\n        {\n            CreateAlign(p_ref,inpitch,0,&tmp_in[0][0],blkwidth+8,blkheight);\n            p_ref = &tmp_in[0][0];\n            ref_offset = 24-blkwidth;\n        }*/\n\n    tmp_void = (void*) & (tmp_res[0][0]);\n    p_tmp = (uint32*) tmp_void;\n\n    for (j = blkheight; j > 0; j--)\n    {\n        r13 = 0;\n        tmp = (uint32)(p_ref + blkwidth);\n\n        //r0 = *((uint32*)p_ref);   /* d,c,b,a */\n        //r1 = (r0>>8)&0xFF00FF;    /* 0,d,0,b */\n        //r0 &= 0xFF00FF;           /* 0,c,0,a */\n        /* It is faster to read 1 byte at a time,  */\n        r0 = p_ref[0];\n        r1 = p_ref[2];\n        r0 |= (r1 << 16);           /* 0,c,0,a */\n        r1 = p_ref[1];\n        r2 = p_ref[3];\n        r1 |= (r2 << 16);           /* 0,d,0,b */\n\n        while ((uint32)p_ref < tmp)\n        {\n            //r2 = *((uint32*)(p_ref+=4));/* h,g,f,e */\n            //r3 = (r2>>8)&0xFF00FF;  /* 0,h,0,f */\n            //r2 &= 0xFF00FF;           /* 0,g,0,e */\n            /* It is faster to read 1 byte at a time,  */\n            r2 = *(p_ref += 4);\n            r3 = p_ref[2];\n            r2 |= (r3 << 16);           /* 0,g,0,e */\n            r3 = p_ref[1];\n            r4 = p_ref[3];\n            r3 |= (r4 << 16);           /* 0,h,0,f */\n\n            r4 = r0 + r3;       /* c+h, a+f */\n            r5 = r0 + r1;   /* c+d, a+b */\n            r6 = r2 + r3;   /* g+h, e+f */\n            r5 >>= 16;\n            r5 |= (r6 << 16);   /* e+f, c+d */\n            r4 += r5 * 20;      /* c+20*e+20*f+h, a+20*c+20*d+f */\n            r4 += 0x100010; /* +16, +16 */\n            r5 = r1 + r2;       /* d+g, b+e */\n            r4 -= r5 * 5;       /* c-5*d+20*e+20*f-5*g+h, a-5*b+20*c+20*d-5*e+f */\n            r4 >>= 5;\n            r13 |= r4;      /* check clipping */\n            r4 &= 0xFF00FF; /* mask */\n\n            r5 = p_ref[4];  /* i */\n            r6 = (r5 << 16);\n            r5 = r6 | (r2 >> 16);/* 0,i,0,g */\n            r5 += r1;       /* d+i, b+g */ /* r5 not free */\n            r1 >>= 16;\n            r1 |= (r3 << 16); /* 0,f,0,d */ /* r1 has changed */\n            r1 += r2;       /* f+g, d+e */\n            r5 += 20 * r1;  /* d+20f+20g+i, b+20d+20e+g */\n            r0 >>= 16;\n            r0 |= (r2 << 16); /* 0,e,0,c */ /* r0 has changed */\n            r0 += r3;       /* e+h, c+f */\n            r5 += 0x100010; /* 16,16 */\n            r5 -= r0 * 5;       /* d-5e+20f+20g-5h+i, b-5c+20d+20e-5f+g */\n            r5 >>= 5;\n            r13 |= r5;      /* check clipping */\n            r5 &= 0xFF00FF; /* mask */\n\n            r4 |= (r5 << 8);    /* pack them together */\n            *p_tmp++ = r4;\n            r1 = r3;\n            r0 = r2;\n        }\n        p_tmp += ((24 - blkwidth) >> 2); /* move to the next line */\n        p_ref += ref_offset;  /*    ref_offset = inpitch-blkwidth; */\n\n        if (r13&0xFF000700) /* need clipping */\n        {\n            /* move back to the beginning of the line */\n            p_ref -= (ref_offset + blkwidth);   /* input */\n            p_tmp -= 6; /* intermediate output */\n            tmp = (uint32)(p_ref + blkwidth);\n            while ((uint32)p_ref < tmp)\n            {\n                r0 = *p_ref++;\n                r1 = *p_ref++;\n                r2 = *p_ref++;\n                r3 = *p_ref++;\n                r4 = *p_ref++;\n                /* first pixel */\n                r5 = *p_ref++;\n                result = (r0 + r5);\n                r0 = (r1 + r4);\n                result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n                r0 = (r2 + r3);\n                result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n                result = (result + 16) >> 5;\n                CLIP_RESULT(result)\n                pkres = result;\n                /* second pixel */\n                r0 = *p_ref++;\n                result = (r1 + r0);\n                r1 = (r2 + r5);\n                result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n                r1 = (r3 + r4);\n                result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n                result = (result + 16) >> 5;\n                CLIP_RESULT(result)\n                pkres |= (result << 8);\n                /* third pixel */\n                r1 = *p_ref++;\n                result = (r2 + r1);\n                r2 = (r3 + r0);\n                result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n                r2 = (r4 + r5);\n                result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n                result = (result + 16) >> 5;\n                CLIP_RESULT(result)\n                pkres |= (result << 16);\n                /* fourth pixel */\n                r2 = *p_ref++;\n                result = (r3 + r2);\n                r3 = (r4 + r1);\n                result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n                r3 = (r5 + r0);\n                result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n                result = (result + 16) >> 5;\n                CLIP_RESULT(result)\n                pkres |= (result << 24);\n\n                *p_tmp++ = pkres; /* write 4 pixel */\n                p_ref -= 5;\n            }\n            p_tmp += ((24 - blkwidth) >> 2); /* move to the next line */\n            p_ref += ref_offset;  /*    ref_offset = inpitch-blkwidth; */\n        }\n    }\n\n    /*  perform vertical interpolation */\n    /* not word-aligned */\n    if (((uint32)in2)&0x3)\n    {\n        CreateAlign(in2, inpitch, -2, &tmp_in[0][0], blkwidth, blkheight + 5);\n        in2 = &tmp_in[2][0];\n        inpitch = 24;\n    }\n\n    p_cur = out;\n    curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically up and one pixel right */\n    pkres = blkheight * inpitch; /* reuse it for limit */\n\n    curr_offset += 3;\n\n    for (j = 0; j < blkwidth; j += 4, in2 += 4)\n    {\n        r13 = 0;\n        p_ref = in2;\n        p_tmp8 = &(tmp_res[0][j]); /* intermediate result */\n        p_tmp8 -= 24;  /* compensate for the first offset */\n        p_cur -= outpitch;  /* compensate for the first offset */\n        tmp = (uint32)(p_ref + pkres); /* limit */\n        while ((uint32)p_ref < tmp)  /* the loop un-rolled  */\n        {\n            /* Read 1 byte at a time is too slow, too many read and pack ops, need to call CreateAlign,  */\n            /*p_ref8 = p_ref-(inpitch<<1);          r0 = p_ref8[0];         r1 = p_ref8[2];\n            r0 |= (r1<<16);         r6 = p_ref8[1];         r1 = p_ref8[3];\n            r6 |= (r1<<16);         p_ref+=inpitch; */\n            r0 = *((uint32*)(p_ref - (inpitch << 1))); /* load 4 bytes */\n            p_ref += inpitch;\n            r6 = (r0 >> 8) & 0xFF00FF; /* second and fourth byte */\n            r0 &= 0xFF00FF;\n\n            /*p_ref8 = p_ref+(inpitch<<1);\n            r1 = p_ref8[0];         r7 = p_ref8[2];         r1 |= (r7<<16);\n            r7 = p_ref8[1];         r2 = p_ref8[3];         r7 |= (r2<<16);*/\n            r1 = *((uint32*)(p_ref + (inpitch << 1)));  /* r1, r7, ref[3] */\n            r7 = (r1 >> 8) & 0xFF00FF;\n            r1 &= 0xFF00FF;\n\n            r0 += r1;\n            r6 += r7;\n\n            /*r2 = p_ref[0];            r8 = p_ref[2];          r2 |= (r8<<16);\n            r8 = p_ref[1];          r1 = p_ref[3];          r8 |= (r1<<16);*/\n            r2 = *((uint32*)p_ref); /* r2, r8, ref[1] */\n            r8 = (r2 >> 8) & 0xFF00FF;\n            r2 &= 0xFF00FF;\n\n            /*p_ref8 = p_ref-inpitch;           r1 = p_ref8[0];         r7 = p_ref8[2];\n            r1 |= (r7<<16);         r1 += r2;           r7 = p_ref8[1];\n            r2 = p_ref8[3];         r7 |= (r2<<16);*/\n            r1 = *((uint32*)(p_ref - inpitch)); /* r1, r7, ref[0] */\n            r7 = (r1 >> 8) & 0xFF00FF;\n            r1 &= 0xFF00FF;\n            r1 += r2;\n\n            r7 += r8;\n\n            r0 += 20 * r1;\n            r6 += 20 * r7;\n            r0 += 0x100010;\n            r6 += 0x100010;\n\n            /*p_ref8 = p_ref-(inpitch<<1);          r2 = p_ref8[0];         r8 = p_ref8[2];\n            r2 |= (r8<<16);         r8 = p_ref8[1];         r1 = p_ref8[3];         r8 |= (r1<<16);*/\n            r2 = *((uint32*)(p_ref - (inpitch << 1))); /* r2, r8, ref[-1] */\n            r8 = (r2 >> 8) & 0xFF00FF;\n            r2 &= 0xFF00FF;\n\n            /*p_ref8 = p_ref+inpitch;           r1 = p_ref8[0];         r7 = p_ref8[2];\n            r1 |= (r7<<16);         r1 += r2;           r7 = p_ref8[1];\n            r2 = p_ref8[3];         r7 |= (r2<<16);*/\n            r1 = *((uint32*)(p_ref + inpitch)); /* r1, r7, ref[2] */\n            r7 = (r1 >> 8) & 0xFF00FF;\n            r1 &= 0xFF00FF;\n            r1 += r2;\n\n            r7 += r8;\n\n            r0 -= 5 * r1;\n            r6 -= 5 * r7;\n\n            r0 >>= 5;\n            r6 >>= 5;\n            /* clip */\n            r13 |= r6;\n            r13 |= r0;\n            //CLIPPACK(r6,result)\n            /* add with horizontal results */\n            r10 = *((uint32*)(p_tmp8 += 24));\n            r9 = (r10 >> 8) & 0xFF00FF;\n            r10 &= 0xFF00FF;\n\n            r0 += r10;\n            r0 += 0x10001;\n            r0 = (r0 >> 1) & 0xFF00FF;   /* mask to 8 bytes */\n\n            r6 += r9;\n            r6 += 0x10001;\n            r6 = (r6 >> 1) & 0xFF00FF;   /* mask to 8 bytes */\n\n            r0 |= (r6 << 8);  /* pack it back */\n            *((uint32*)(p_cur += outpitch)) = r0;\n        }\n        p_cur += curr_offset; /* offset to the next pixel */\n        if (r13 & 0xFF000700) /* this column need clipping */\n        {\n            p_cur -= 4;\n            for (i = 0; i < 4; i++)\n            {\n                p_ref = in2 + i;\n                p_tmp8 = &(tmp_res[0][j+i]); /* intermediate result */\n                p_tmp8 -= 24;  /* compensate for the first offset */\n                p_cur -= outpitch;  /* compensate for the first offset */\n                tmp = (uint32)(p_ref + pkres); /* limit */\n                while ((uint32)p_ref < tmp)  /* the loop un-rolled  */\n                {\n                    r0 = *(p_ref - (inpitch << 1));\n                    r1 = *(p_ref - inpitch);\n                    r2 = *p_ref;\n                    r3 = *(p_ref += inpitch);  /* modify pointer before loading */\n                    r4 = *(p_ref += inpitch);\n                    /* first pixel */\n                    r5 = *(p_ref += inpitch);\n                    result = (r0 + r5);\n                    r0 = (r1 + r4);\n                    result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n                    r0 = (r2 + r3);\n                    result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    tmp_result = *(p_tmp8 += 24);  /* modify pointer before loading */\n                    result = (result + tmp_result + 1);  /* no clip */\n                    result = (result >> 1);\n                    *(p_cur += outpitch) = result;\n                    /* second pixel */\n                    r0 = *(p_ref += inpitch);\n                    result = (r1 + r0);\n                    r1 = (r2 + r5);\n                    result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n                    r1 = (r3 + r4);\n                    result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    tmp_result = *(p_tmp8 += 24);  /* intermediate result */\n                    result = (result + tmp_result + 1);  /* no clip */\n                    result = (result >> 1);\n                    *(p_cur += outpitch) = result;\n                    /* third pixel */\n                    r1 = *(p_ref += inpitch);\n                    result = (r2 + r1);\n                    r2 = (r3 + r0);\n                    result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n                    r2 = (r4 + r5);\n                    result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    tmp_result = *(p_tmp8 += 24);  /* intermediate result */\n                    result = (result + tmp_result + 1);  /* no clip */\n                    result = (result >> 1);\n                    *(p_cur += outpitch) = result;\n                    /* fourth pixel */\n                    r2 = *(p_ref += inpitch);\n                    result = (r3 + r2);\n                    r3 = (r4 + r1);\n                    result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n                    r3 = (r5 + r0);\n                    result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    tmp_result = *(p_tmp8 += 24);  /* intermediate result */\n                    result = (result + tmp_result + 1);  /* no clip */\n                    result = (result >> 1);\n                    *(p_cur += outpitch) = result;\n                    p_ref -= (inpitch << 1);  /* move back to center of the filter of the next one */\n                }\n                p_cur += (curr_offset - 3);\n            }\n        }\n    }\n\n    return ;\n}\n\n/* position G */\nvoid FullPelMC(uint8 *in, int inpitch, uint8 *out, int outpitch,\n               int blkwidth, int blkheight)\n{\n    int i, j;\n    int offset_in = inpitch - blkwidth;\n    int offset_out = outpitch - blkwidth;\n    uint32 temp;\n    uint8 byte;\n\n    if (((uint32)in)&3)\n    {\n        for (j = blkheight; j > 0; j--)\n        {\n            for (i = blkwidth; i > 0; i -= 4)\n            {\n                temp = *in++;\n                byte = *in++;\n                temp |= (byte << 8);\n                byte = *in++;\n                temp |= (byte << 16);\n                byte = *in++;\n                temp |= (byte << 24);\n\n                *((uint32*)out) = temp; /* write 4 bytes */\n                out += 4;\n            }\n            out += offset_out;\n            in += offset_in;\n        }\n    }\n    else\n    {\n        for (j = blkheight; j > 0; j--)\n        {\n            for (i = blkwidth; i > 0; i -= 4)\n            {\n                temp = *((uint32*)in);\n                *((uint32*)out) = temp;\n                in += 4;\n                out += 4;\n            }\n            out += offset_out;\n            in += offset_in;\n        }\n    }\n    return ;\n}\n\nvoid ChromaMotionComp(uint8 *ref, int picwidth, int picheight,\n                      int x_pos, int y_pos,\n                      uint8 *pred, int pred_pitch,\n                      int blkwidth, int blkheight)\n{\n    int dx, dy;\n    int offset_dx, offset_dy;\n    int index;\n    uint8 temp[24][24];\n\n    dx = x_pos & 7;\n    dy = y_pos & 7;\n    offset_dx = (dx + 7) >> 3;\n    offset_dy = (dy + 7) >> 3;\n    x_pos = x_pos >> 3;  /* round it to full-pel resolution */\n    y_pos = y_pos >> 3;\n\n    if ((x_pos >= 0 && x_pos + blkwidth + offset_dx <= picwidth) && (y_pos >= 0 && y_pos + blkheight + offset_dy <= picheight))\n    {\n        ref += y_pos * picwidth + x_pos;\n    }\n    else\n    {\n        CreatePad(ref, picwidth, picheight, x_pos, y_pos, &temp[0][0], blkwidth + offset_dx, blkheight + offset_dy);\n        ref = &temp[0][0];\n        picwidth = 24;\n    }\n\n    index = offset_dx + (offset_dy << 1) + ((blkwidth << 1) & 0x7);\n\n    (*(ChromaMC_SIMD[index]))(ref, picwidth , dx, dy, pred, pred_pitch, blkwidth, blkheight);\n    return ;\n}\n\n\n/* SIMD routines, unroll the loops in vertical direction, decreasing loops (things to be done)  */\nvoid ChromaDiagonalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                           uint8 *pOut, int predPitch, int blkwidth, int blkheight)\n{\n    int32 r0, r1, r2, r3, result0, result1;\n    uint8 temp[288];\n    uint8 *ref, *out;\n    int i, j;\n    int dx_8 = 8 - dx;\n    int dy_8 = 8 - dy;\n\n    /* horizontal first */\n    out = temp;\n    for (i = 0; i < blkheight + 1; i++)\n    {\n        ref = pRef;\n        r0 = ref[0];\n        for (j = 0; j < blkwidth; j += 4)\n        {\n            r0 |= (ref[2] << 16);\n            result0 = dx_8 * r0;\n\n            r1 = ref[1] | (ref[3] << 16);\n            result0 += dx * r1;\n            *(int32 *)out = result0;\n\n            result0 = dx_8 * r1;\n\n            r2 = ref[4];\n            r0 = r0 >> 16;\n            r1 = r0 | (r2 << 16);\n            result0 += dx * r1;\n            *(int32 *)(out + 16) = result0;\n\n            ref += 4;\n            out += 4;\n            r0 = r2;\n        }\n        pRef += srcPitch;\n        out += (32 - blkwidth);\n    }\n\n//  pRef -= srcPitch*(blkheight+1);\n    ref = temp;\n\n    for (j = 0; j < blkwidth; j += 4)\n    {\n        r0 = *(int32 *)ref;\n        r1 = *(int32 *)(ref + 16);\n        ref += 32;\n        out = pOut;\n        for (i = 0; i < (blkheight >> 1); i++)\n        {\n            result0 = dy_8 * r0 + 0x00200020;\n            r2 = *(int32 *)ref;\n            result0 += dy * r2;\n            result0 >>= 6;\n            result0 &= 0x00FF00FF;\n            r0 = r2;\n\n            result1 = dy_8 * r1 + 0x00200020;\n            r3 = *(int32 *)(ref + 16);\n            result1 += dy * r3;\n            result1 >>= 6;\n            result1 &= 0x00FF00FF;\n            r1 = r3;\n            *(int32 *)out = result0 | (result1 << 8);\n            out += predPitch;\n            ref += 32;\n\n            result0 = dy_8 * r0 + 0x00200020;\n            r2 = *(int32 *)ref;\n            result0 += dy * r2;\n            result0 >>= 6;\n            result0 &= 0x00FF00FF;\n            r0 = r2;\n\n            result1 = dy_8 * r1 + 0x00200020;\n            r3 = *(int32 *)(ref + 16);\n            result1 += dy * r3;\n            result1 >>= 6;\n            result1 &= 0x00FF00FF;\n            r1 = r3;\n            *(int32 *)out = result0 | (result1 << 8);\n            out += predPitch;\n            ref += 32;\n        }\n        pOut += 4;\n        ref = temp + 4; /* since it can only iterate twice max  */\n    }\n    return;\n}\n\nvoid ChromaHorizontalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                             uint8 *pOut, int predPitch, int blkwidth, int blkheight)\n{\n    OSCL_UNUSED_ARG(dy);\n    int32 r0, r1, r2, result0, result1;\n    uint8 *ref, *out;\n    int i, j;\n    int dx_8 = 8 - dx;\n\n    /* horizontal first */\n    for (i = 0; i < blkheight; i++)\n    {\n        ref = pRef;\n        out = pOut;\n\n        r0 = ref[0];\n        for (j = 0; j < blkwidth; j += 4)\n        {\n            r0 |= (ref[2] << 16);\n            result0 = dx_8 * r0 + 0x00040004;\n\n            r1 = ref[1] | (ref[3] << 16);\n            result0 += dx * r1;\n            result0 >>= 3;\n            result0 &= 0x00FF00FF;\n\n            result1 = dx_8 * r1 + 0x00040004;\n\n            r2 = ref[4];\n            r0 = r0 >> 16;\n            r1 = r0 | (r2 << 16);\n            result1 += dx * r1;\n            result1 >>= 3;\n            result1 &= 0x00FF00FF;\n\n            *(int32 *)out = result0 | (result1 << 8);\n\n            ref += 4;\n            out += 4;\n            r0 = r2;\n        }\n\n        pRef += srcPitch;\n        pOut += predPitch;\n    }\n    return;\n}\n\nvoid ChromaVerticalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                           uint8 *pOut, int predPitch, int blkwidth, int blkheight)\n{\n    OSCL_UNUSED_ARG(dx);\n    int32 r0, r1, r2, r3, result0, result1;\n    int i, j;\n    uint8 *ref, *out;\n    int dy_8 = 8 - dy;\n    /* vertical first */\n    for (i = 0; i < blkwidth; i += 4)\n    {\n        ref = pRef;\n        out = pOut;\n\n        r0 = ref[0] | (ref[2] << 16);\n        r1 = ref[1] | (ref[3] << 16);\n        ref += srcPitch;\n        for (j = 0; j < blkheight; j++)\n        {\n            result0 = dy_8 * r0 + 0x00040004;\n            r2 = ref[0] | (ref[2] << 16);\n            result0 += dy * r2;\n            result0 >>= 3;\n            result0 &= 0x00FF00FF;\n            r0 = r2;\n\n            result1 = dy_8 * r1 + 0x00040004;\n            r3 = ref[1] | (ref[3] << 16);\n            result1 += dy * r3;\n            result1 >>= 3;\n            result1 &= 0x00FF00FF;\n            r1 = r3;\n            *(int32 *)out = result0 | (result1 << 8);\n            ref += srcPitch;\n            out += predPitch;\n        }\n        pOut += 4;\n        pRef += 4;\n    }\n    return;\n}\n\nvoid ChromaDiagonalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                            uint8 *pOut,  int predPitch, int blkwidth, int blkheight)\n{\n    OSCL_UNUSED_ARG(blkwidth);\n    int32 r0, r1, temp0, temp1, result;\n    int32 temp[9];\n    int32 *out;\n    int i, r_temp;\n    int dy_8 = 8 - dy;\n\n    /* horizontal first */\n    out = temp;\n    for (i = 0; i < blkheight + 1; i++)\n    {\n        r_temp = pRef[1];\n        temp0 = (pRef[0] << 3) + dx * (r_temp - pRef[0]);\n        temp1 = (r_temp << 3) + dx * (pRef[2] - r_temp);\n        r0 = temp0 | (temp1 << 16);\n        *out++ = r0;\n        pRef += srcPitch;\n    }\n\n    pRef -= srcPitch * (blkheight + 1);\n\n    out = temp;\n\n    r0 = *out++;\n\n    for (i = 0; i < blkheight; i++)\n    {\n        result = dy_8 * r0 + 0x00200020;\n        r1 = *out++;\n        result += dy * r1;\n        result >>= 6;\n        result &= 0x00FF00FF;\n        *(int16 *)pOut = (result >> 8) | (result & 0xFF);\n        r0 = r1;\n        pOut += predPitch;\n    }\n    return;\n}\n\nvoid ChromaHorizontalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                              uint8 *pOut, int predPitch, int blkwidth, int blkheight)\n{\n    OSCL_UNUSED_ARG(dy);\n    OSCL_UNUSED_ARG(blkwidth);\n    int i, temp, temp0, temp1;\n\n    /* horizontal first */\n    for (i = 0; i < blkheight; i++)\n    {\n        temp = pRef[1];\n        temp0 = ((pRef[0] << 3) + dx * (temp - pRef[0]) + 4) >> 3;\n        temp1 = ((temp << 3) + dx * (pRef[2] - temp) + 4) >> 3;\n\n        *(int16 *)pOut = temp0 | (temp1 << 8);\n        pRef += srcPitch;\n        pOut += predPitch;\n\n    }\n    return;\n}\nvoid ChromaVerticalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                            uint8 *pOut, int predPitch, int blkwidth, int blkheight)\n{\n    OSCL_UNUSED_ARG(dx);\n    OSCL_UNUSED_ARG(blkwidth);\n    int32 r0, r1, result;\n    int i;\n    int dy_8 = 8 - dy;\n    r0 = pRef[0] | (pRef[1] << 16);\n    pRef += srcPitch;\n    for (i = 0; i < blkheight; i++)\n    {\n        result = dy_8 * r0 + 0x00040004;\n        r1 = pRef[0] | (pRef[1] << 16);\n        result += dy * r1;\n        result >>= 3;\n        result &= 0x00FF00FF;\n        *(int16 *)pOut = (result >> 8) | (result & 0xFF);\n        r0 = r1;\n        pRef += srcPitch;\n        pOut += predPitch;\n    }\n    return;\n}\n\nvoid ChromaFullMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                       uint8 *pOut, int predPitch, int blkwidth, int blkheight)\n{\n    OSCL_UNUSED_ARG(dx);\n    OSCL_UNUSED_ARG(dy);\n    int i, j;\n    int offset_in = srcPitch - blkwidth;\n    int offset_out = predPitch - blkwidth;\n    uint16 temp;\n    uint8 byte;\n\n    if (((uint32)pRef)&1)\n    {\n        for (j = blkheight; j > 0; j--)\n        {\n            for (i = blkwidth; i > 0; i -= 2)\n            {\n                temp = *pRef++;\n                byte = *pRef++;\n                temp |= (byte << 8);\n                *((uint16*)pOut) = temp; /* write 2 bytes */\n                pOut += 2;\n            }\n            pOut += offset_out;\n            pRef += offset_in;\n        }\n    }\n    else\n    {\n        for (j = blkheight; j > 0; j--)\n        {\n            for (i = blkwidth; i > 0; i -= 2)\n            {\n                temp = *((uint16*)pRef);\n                *((uint16*)pOut) = temp;\n                pRef += 2;\n                pOut += 2;\n            }\n            pOut += offset_out;\n            pRef += offset_in;\n        }\n    }\n    return ;\n}\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/src/pred_intra.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"avcdec_lib.h\"\n#include \"oscl_mem.h\"\n\n#define CLIP_COMP  *comp++ = (uint8)(((uint)temp>0xFF)? 0xFF&(~(temp>>31)): temp)\n#define CLIP_RESULT(x)      if((uint)x > 0xFF){ \\\n                 x = 0xFF & (~(x>>31));}\n\n\n/* We should combine the Intra4x4 functions with residual decoding and compensation  */\nAVCStatus IntraMBPrediction(AVCCommonObj *video)\n{\n    int component, SubBlock_indx, temp;\n    AVCStatus status;\n    AVCMacroblock *currMB = video->currMB;\n    AVCPictureData *currPic = video->currPic;\n    uint8 *curL, *curCb, *curCr;\n    uint8 *comp;\n    int block_x, block_y, offset;\n    int16 *dataBlock = video->block;\n    uint8 *predCb, *predCr;\n#ifdef USE_PRED_BLOCK\n    uint8 *pred;\n#endif\n    int pitch = currPic->pitch;\n    uint32 cbp4x4 = video->cbp4x4;\n\n    offset = (video->mb_y << 4) * pitch + (video->mb_x << 4);\n    curL = currPic->Sl + offset;\n\n#ifdef USE_PRED_BLOCK\n    video->pred_block = video->pred + 84;  /* point to separate prediction memory */\n    pred = video->pred_block;\n    video->pred_pitch = 20;\n#else\n    video->pred_block = curL;   /* point directly to the frame buffer */\n    video->pred_pitch = pitch;\n#endif\n\n    if (currMB->mbMode == AVC_I4)\n    {\n        /* luminance first */\n        block_x = block_y = 0;\n        for (component = 0; component < 4; component++)\n        {\n            block_x = ((component & 1) << 1);\n            block_y = ((component >> 1) << 1);\n            comp = curL;// + (block_x<<2) + (block_y<<2)*currPic->pitch;\n\n            for (SubBlock_indx = 0; SubBlock_indx < 4; SubBlock_indx++)\n            {\n                status = Intra_4x4(video, block_x, block_y, comp);\n                if (status != AVC_SUCCESS)\n                {\n                    return status;\n                }\n                /* transform following the 4x4 prediction, can't be SIMD\n                with other blocks. */\n#ifdef USE_PRED_BLOCK\n                if (cbp4x4&(1 << ((block_y << 2) + block_x)))\n                {\n                    itrans(dataBlock, pred, pred, 20);\n                }\n#else\n                if (cbp4x4&(1 << ((block_y << 2) + block_x)))\n                {\n                    itrans(dataBlock, comp, comp, pitch);\n                }\n#endif\n                temp = SubBlock_indx & 1;\n                if (temp)\n                {\n                    block_y++;\n                    block_x--;\n                    dataBlock += 60;\n#ifdef USE_PRED_BLOCK\n                    pred += 76;\n#else\n                    comp += ((pitch << 2) - 4);\n#endif\n                }\n                else\n                {\n                    block_x++;\n                    dataBlock += 4;\n#ifdef USE_PRED_BLOCK\n                    pred += 4;\n#else\n                    comp += 4;\n#endif\n                }\n            }\n            if (component&1)\n            {\n#ifdef USE_PRED_BLOCK\n                pred -= 8;\n#else\n                curL += (pitch << 3) - 8;\n#endif\n                dataBlock -= 8;\n            }\n            else\n            {\n#ifdef USE_PRED_BLOCK\n                pred -= 152;\n#else\n                curL += 8;\n#endif\n                dataBlock -= 120;\n            }\n        }\n        cbp4x4 >>= 16;\n    }\n    else   /* AVC_I16 */\n    {\n#ifdef MB_BASED_DEBLOCK\n        video->pintra_pred_top = video->intra_pred_top + (video->mb_x << 4);\n        video->pintra_pred_left = video->intra_pred_left + 1;\n        video->intra_pred_topleft = video->intra_pred_left[0];\n        pitch = 1;\n#else\n        video->pintra_pred_top = curL - pitch;\n        video->pintra_pred_left = curL - 1;\n        if (video->mb_y)\n        {\n            video->intra_pred_topleft = *(curL - pitch - 1);\n        }\n#endif\n        switch (currMB->i16Mode)\n        {\n            case AVC_I16_Vertical:      /* Intra_16x16_Vertical */\n                /* check availability of top */\n                if (video->intraAvailB)\n                {\n                    Intra_16x16_Vertical(video);\n                }\n                else\n                {\n                    return AVC_FAIL;\n                }\n                break;\n            case AVC_I16_Horizontal:        /* Intra_16x16_Horizontal */\n                /* check availability of left */\n                if (video->intraAvailA)\n                {\n                    Intra_16x16_Horizontal(video, pitch);\n                }\n                else\n                {\n                    return AVC_FAIL;\n                }\n                break;\n            case AVC_I16_DC:        /* Intra_16x16_DC */\n                Intra_16x16_DC(video, pitch);\n                break;\n            case AVC_I16_Plane:     /* Intra_16x16_Plane */\n                if (video->intraAvailA && video->intraAvailB && video->intraAvailD)\n                {\n                    Intra_16x16_Plane(video, pitch);\n                }\n                else\n                {\n                    return AVC_FAIL;\n                }\n                break;\n            default:\n                break;\n        }\n\n        pitch = currPic->pitch;\n\n        /* transform */\n        /* can go in raster scan order now */\n        /* can be done in SIMD,  */\n        for (block_y = 4; block_y > 0; block_y--)\n        {\n            for (block_x = 4; block_x > 0; block_x--)\n            {\n#ifdef USE_PRED_BLOCK\n                if (cbp4x4&1)\n                {\n                    itrans(dataBlock, pred, pred, 20);\n                }\n#else\n                if (cbp4x4&1)\n                {\n                    itrans(dataBlock, curL, curL, pitch);\n                }\n#endif\n                cbp4x4 >>= 1;\n                dataBlock += 4;\n#ifdef USE_PRED_BLOCK\n                pred += 4;\n#else\n                curL += 4;\n#endif\n            }\n            dataBlock += 48;\n#ifdef USE_PRED_BLOCK\n            pred += 64;\n#else\n            curL += ((pitch << 2) - 16);\n#endif\n        }\n    }\n\n    offset = (offset >> 2) + (video->mb_x << 2); //((video->mb_y << 3)* pitch + (video->mb_x << 3));\n    curCb = currPic->Scb + offset;\n    curCr = currPic->Scr + offset;\n\n#ifdef MB_BASED_DEBLOCK\n    video->pintra_pred_top_cb = video->intra_pred_top_cb + (video->mb_x << 3);\n    video->pintra_pred_left_cb = video->intra_pred_left_cb + 1;\n    video->intra_pred_topleft_cb = video->intra_pred_left_cb[0];\n    video->pintra_pred_top_cr = video->intra_pred_top_cr + (video->mb_x << 3);\n    video->pintra_pred_left_cr = video->intra_pred_left_cr + 1;\n    video->intra_pred_topleft_cr = video->intra_pred_left_cr[0];\n    pitch  = 1;\n#else\n    pitch >>= 1;\n    video->pintra_pred_top_cb = curCb - pitch;\n    video->pintra_pred_left_cb = curCb - 1;\n    video->pintra_pred_top_cr = curCr - pitch;\n    video->pintra_pred_left_cr = curCr - 1;\n\n    if (video->mb_y)\n    {\n        video->intra_pred_topleft_cb = *(curCb - pitch - 1);\n        video->intra_pred_topleft_cr = *(curCr - pitch - 1);\n    }\n#endif\n\n#ifdef USE_PRED_BLOCK\n    predCb = video->pred + 452;\n    predCr = predCb + 144;\n    video->pred_pitch = 12;\n#else\n    predCb = curCb;\n    predCr = curCr;\n    video->pred_pitch = currPic->pitch >> 1;\n#endif\n    /* chrominance */\n    switch (currMB->intra_chroma_pred_mode)\n    {\n        case AVC_IC_DC:     /* Intra_Chroma_DC */\n            Intra_Chroma_DC(video, pitch, predCb, predCr);\n            break;\n        case AVC_IC_Horizontal:     /* Intra_Chroma_Horizontal */\n            if (video->intraAvailA)\n            {\n                /* check availability of left */\n                Intra_Chroma_Horizontal(video, pitch, predCb, predCr);\n            }\n            else\n            {\n                return AVC_FAIL;\n            }\n            break;\n        case AVC_IC_Vertical:       /* Intra_Chroma_Vertical */\n            if (video->intraAvailB)\n            {\n                /* check availability of top */\n                Intra_Chroma_Vertical(video, predCb, predCr);\n            }\n            else\n            {\n                return AVC_FAIL;\n            }\n            break;\n        case AVC_IC_Plane:      /* Intra_Chroma_Plane */\n            if (video->intraAvailA && video->intraAvailB && video->intraAvailD)\n            {\n                /* check availability of top and left */\n                Intra_Chroma_Plane(video, pitch, predCb, predCr);\n            }\n            else\n            {\n                return AVC_FAIL;\n            }\n            break;\n        default:\n            break;\n    }\n\n    /* transform, done in raster scan manner */\n    pitch = currPic->pitch >> 1;\n\n    for (block_y = 2; block_y > 0; block_y--)\n    {\n        for (block_x = 2; block_x > 0; block_x--)\n        {\n#ifdef USE_PRED_BLOCK\n            if (cbp4x4&1)\n            {\n                ictrans(dataBlock, predCb, predCb, 12);\n            }\n#else\n            if (cbp4x4&1)\n            {\n                ictrans(dataBlock, curCb, curCb, pitch);\n            }\n#endif\n            cbp4x4 >>= 1;\n            dataBlock += 4;\n#ifdef USE_PRED_BLOCK\n            predCb += 4;\n#else\n            curCb += 4;\n#endif\n        }\n        for (block_x = 2; block_x > 0; block_x--)\n        {\n#ifdef USE_PRED_BLOCK\n            if (cbp4x4&1)\n            {\n                ictrans(dataBlock, predCr, predCr, 12);\n            }\n#else\n            if (cbp4x4&1)\n            {\n                ictrans(dataBlock, curCr, curCr, pitch);\n            }\n#endif\n            cbp4x4 >>= 1;\n            dataBlock += 4;\n#ifdef USE_PRED_BLOCK\n            predCr += 4;\n#else\n            curCr += 4;\n#endif\n        }\n        dataBlock += 48;\n#ifdef USE_PRED_BLOCK\n        predCb += 40;\n        predCr += 40;\n#else\n        curCb += ((pitch << 2) - 8);\n        curCr += ((pitch << 2) - 8);\n#endif\n    }\n\n#ifdef MB_BASED_DEBLOCK\n    SaveNeighborForIntraPred(video, offset);\n#endif\n    return AVC_SUCCESS;\n}\n\n#ifdef MB_BASED_DEBLOCK\nvoid SaveNeighborForIntraPred(AVCCommonObj *video, int offset)\n{\n    AVCPictureData *currPic = video->currPic;\n    int pitch;\n    uint8 *pred, *predCb, *predCr;\n    uint8 *tmp_ptr, tmp_byte;\n    uint32 tmp_word;\n    int mb_x = video->mb_x;\n\n    /* save the value for intra prediction  */\n#ifdef USE_PRED_BLOCK\n    pitch = 20;\n    pred = video->pred + 384; /* bottom line for Y */\n    predCb = pred + 152;    /* bottom line for Cb */\n    predCr = predCb + 144;  /* bottom line for Cr */\n#else\n    pitch = currPic->pitch;\n    tmp_word = offset + (pitch << 2) - (pitch >> 1);\n    predCb = currPic->Scb + tmp_word;/* bottom line for Cb */\n    predCr = currPic->Scr + tmp_word;/* bottom line for Cr */\n\n    offset = (offset << 2) - (mb_x << 4);\n    pred = currPic->Sl + offset + (pitch << 4) - pitch;/* bottom line for Y */\n\n#endif\n\n    video->intra_pred_topleft = video->intra_pred_top[(mb_x<<4)+15];\n    video->intra_pred_topleft_cb = video->intra_pred_top_cb[(mb_x<<3)+7];\n    video->intra_pred_topleft_cr = video->intra_pred_top_cr[(mb_x<<3)+7];\n\n    /* then copy to video->intra_pred_top, intra_pred_top_cb, intra_pred_top_cr */\n    /*oscl_memcpy(video->intra_pred_top + (mb_x<<4), pred, 16);\n    oscl_memcpy(video->intra_pred_top_cb + (mb_x<<3), predCb, 8);\n    oscl_memcpy(video->intra_pred_top_cr + (mb_x<<3), predCr, 8);*/\n    tmp_ptr = video->intra_pred_top + (mb_x << 4);\n    *((uint32*)tmp_ptr) = *((uint32*)pred);\n    *((uint32*)(tmp_ptr + 4)) = *((uint32*)(pred + 4));\n    *((uint32*)(tmp_ptr + 8)) = *((uint32*)(pred + 8));\n    *((uint32*)(tmp_ptr + 12)) = *((uint32*)(pred + 12));\n    tmp_ptr = video->intra_pred_top_cb + (mb_x << 3);\n    *((uint32*)tmp_ptr) = *((uint32*)predCb);\n    *((uint32*)(tmp_ptr + 4)) = *((uint32*)(predCb + 4));\n    tmp_ptr = video->intra_pred_top_cr + (mb_x << 3);\n    *((uint32*)tmp_ptr) = *((uint32*)predCr);\n    *((uint32*)(tmp_ptr + 4)) = *((uint32*)(predCr + 4));\n\n\n    /* now save last column */\n#ifdef USE_PRED_BLOCK\n    pred = video->pred + 99;    /* last column*/\n#else\n    pred -= ((pitch << 4) - pitch - 15);    /* last column */\n#endif\n    tmp_ptr = video->intra_pred_left;\n    tmp_word = video->intra_pred_topleft;\n    tmp_byte = *(pred);\n    tmp_word |= (tmp_byte << 8);\n    tmp_byte = *(pred += pitch);\n    tmp_word |= (tmp_byte << 16);\n    tmp_byte = *(pred += pitch);\n    tmp_word |= (tmp_byte << 24);\n    *((uint32*)tmp_ptr) = tmp_word;\n    tmp_word = *(pred += pitch);\n    tmp_byte = *(pred += pitch);\n    tmp_word |= (tmp_byte << 8);\n    tmp_byte = *(pred += pitch);\n    tmp_word |= (tmp_byte << 16);\n    tmp_byte = *(pred += pitch);\n    tmp_word |= (tmp_byte << 24);\n    *((uint32*)(tmp_ptr += 4)) = tmp_word;\n    tmp_word = *(pred += pitch);\n    tmp_byte = *(pred += pitch);\n    tmp_word |= (tmp_byte << 8);\n    tmp_byte = *(pred += pitch);\n    tmp_word |= (tmp_byte << 16);\n    tmp_byte = *(pred += pitch);\n    tmp_word |= (tmp_byte << 24);\n    *((uint32*)(tmp_ptr += 4)) = tmp_word;\n    tmp_word = *(pred += pitch);\n    tmp_byte = *(pred += pitch);\n    tmp_word |= (tmp_byte << 8);\n    tmp_byte = *(pred += pitch);\n    tmp_word |= (tmp_byte << 16);\n    tmp_byte = *(pred += pitch);\n    tmp_word |= (tmp_byte << 24);\n    *((uint32*)(tmp_ptr += 4)) = tmp_word;\n    *(tmp_ptr += 4) = *(pred += pitch);\n\n    /* now for Cb */\n#ifdef USE_PRED_BLOCK\n    predCb = video->pred + 459;\n    pitch = 12;\n#else\n    pitch >>= 1;\n    predCb -= (7 * pitch - 7);\n#endif\n    tmp_ptr = video->intra_pred_left_cb;\n    tmp_word = video->intra_pred_topleft_cb;\n    tmp_byte = *(predCb);\n    tmp_word |= (tmp_byte << 8);\n    tmp_byte = *(predCb += pitch);\n    tmp_word |= (tmp_byte << 16);\n    tmp_byte = *(predCb += pitch);\n    tmp_word |= (tmp_byte << 24);\n    *((uint32*)tmp_ptr) = tmp_word;\n    tmp_word = *(predCb += pitch);\n    tmp_byte = *(predCb += pitch);\n    tmp_word |= (tmp_byte << 8);\n    tmp_byte = *(predCb += pitch);\n    tmp_word |= (tmp_byte << 16);\n    tmp_byte = *(predCb += pitch);\n    tmp_word |= (tmp_byte << 24);\n    *((uint32*)(tmp_ptr += 4)) = tmp_word;\n    *(tmp_ptr += 4) = *(predCb += pitch);\n\n    /* now for Cr */\n#ifdef USE_PRED_BLOCK\n    predCr = video->pred + 603;\n#else\n    predCr -= (7 * pitch - 7);\n#endif\n    tmp_ptr = video->intra_pred_left_cr;\n    tmp_word = video->intra_pred_topleft_cr;\n    tmp_byte = *(predCr);\n    tmp_word |= (tmp_byte << 8);\n    tmp_byte = *(predCr += pitch);\n    tmp_word |= (tmp_byte << 16);\n    tmp_byte = *(predCr += pitch);\n    tmp_word |= (tmp_byte << 24);\n    *((uint32*)tmp_ptr) = tmp_word;\n    tmp_word = *(predCr += pitch);\n    tmp_byte = *(predCr += pitch);\n    tmp_word |= (tmp_byte << 8);\n    tmp_byte = *(predCr += pitch);\n    tmp_word |= (tmp_byte << 16);\n    tmp_byte = *(predCr += pitch);\n    tmp_word |= (tmp_byte << 24);\n    *((uint32*)(tmp_ptr += 4)) = tmp_word;\n    *(tmp_ptr += 4) = *(predCr += pitch);\n\n    return ;\n}\n#endif /* MB_BASED_DEBLOCK */\n\nAVCStatus Intra_4x4(AVCCommonObj *video, int block_x, int block_y, uint8 *comp)\n{\n    AVCMacroblock *currMB = video->currMB;\n    int block_offset;\n    AVCNeighborAvailability availability;\n    int pitch = video->currPic->pitch;\n\n#ifdef USE_PRED_BLOCK\n    block_offset = (block_y * 80) + (block_x << 2);\n#else\n    block_offset = (block_y << 2) * pitch + (block_x << 2);\n#endif\n\n#ifdef MB_BASED_DEBLOCK\n    /* boundary blocks use video->pred_intra_top, pred_intra_left, pred_intra_topleft */\n    if (!block_x)\n    {\n        video->pintra_pred_left = video->intra_pred_left + 1 + (block_y << 2);\n        pitch = 1;\n    }\n    else\n    {\n        video->pintra_pred_left = video->pred_block + block_offset - 1;\n        pitch = video->pred_pitch;\n    }\n\n    if (!block_y)\n    {\n        video->pintra_pred_top = video->intra_pred_top + (block_x << 2) + (video->mb_x << 4);\n    }\n    else\n    {\n        video->pintra_pred_top = video->pred_block + block_offset - video->pred_pitch;\n    }\n\n    if (!block_x)\n    {\n        video->intra_pred_topleft = video->intra_pred_left[block_y<<2];\n    }\n    else if (!block_y)\n    {\n        video->intra_pred_topleft = video->intra_pred_top[(video->mb_x<<4)+(block_x<<2)-1];\n    }\n    else\n    {\n        video->intra_pred_topleft = video->pred_block[block_offset - video->pred_pitch - 1];\n    }\n\n#else\n    /* normal case */\n    video->pintra_pred_top = comp - pitch;\n    video->pintra_pred_left = comp - 1;\n    if (video->mb_y || block_y)\n    {\n        video->intra_pred_topleft = *(comp - pitch - 1);\n    }\n#endif\n\n    switch (currMB->i4Mode[(block_y << 2) + block_x])\n    {\n        case AVC_I4_Vertical:       /* Intra_4x4_Vertical */\n            if (block_y > 0 || video->intraAvailB)/* to prevent out-of-bound access*/\n            {\n                Intra_4x4_Vertical(video,  block_offset);\n            }\n            else\n            {\n                return AVC_FAIL;\n            }\n            break;\n\n        case AVC_I4_Horizontal:     /* Intra_4x4_Horizontal */\n            if (block_x || video->intraAvailA)  /* to prevent out-of-bound access */\n            {\n                Intra_4x4_Horizontal(video, pitch, block_offset);\n            }\n            else\n            {\n                return AVC_FAIL;\n            }\n            break;\n\n        case AVC_I4_DC:     /* Intra_4x4_DC */\n            availability.left = TRUE;\n            availability.top = TRUE;\n            if (!block_y)\n            { /* check availability up */\n                availability.top = video->intraAvailB ;\n            }\n            if (!block_x)\n            { /* check availability left */\n                availability.left = video->intraAvailA ;\n            }\n            Intra_4x4_DC(video, pitch, block_offset, &availability);\n            break;\n\n        case AVC_I4_Diagonal_Down_Left:     /* Intra_4x4_Diagonal_Down_Left */\n            /* lookup table will be more appropriate for this case  */\n            if (block_y == 0 && !video->intraAvailB)\n            {\n                return AVC_FAIL;\n            }\n\n            availability.top_right = BlkTopRight[(block_y<<2) + block_x];\n\n            if (availability.top_right == 2)\n            {\n                availability.top_right = video->intraAvailB;\n            }\n            else if (availability.top_right == 3)\n            {\n                availability.top_right = video->intraAvailC;\n            }\n\n            Intra_4x4_Down_Left(video, block_offset, &availability);\n            break;\n\n        case AVC_I4_Diagonal_Down_Right:        /* Intra_4x4_Diagonal_Down_Right */\n            if ((block_y && block_x)  /* to prevent out-of-bound access */\n                    || (block_y && video->intraAvailA)\n                    || (block_x && video->intraAvailB)\n                    || (video->intraAvailA && video->intraAvailD && video->intraAvailB))\n            {\n                Intra_4x4_Diagonal_Down_Right(video, pitch, block_offset);\n            }\n            else\n            {\n                return AVC_FAIL;\n            }\n            break;\n\n        case AVC_I4_Vertical_Right:     /* Intra_4x4_Vertical_Right */\n            if ((block_y && block_x)  /* to prevent out-of-bound access */\n                    || (block_y && video->intraAvailA)\n                    || (block_x && video->intraAvailB)\n                    || (video->intraAvailA && video->intraAvailD && video->intraAvailB))\n            {\n                Intra_4x4_Diagonal_Vertical_Right(video, pitch, block_offset);\n            }\n            else\n            {\n                return AVC_FAIL;\n            }\n            break;\n\n        case AVC_I4_Horizontal_Down:        /* Intra_4x4_Horizontal_Down */\n            if ((block_y && block_x)  /* to prevent out-of-bound access */\n                    || (block_y && video->intraAvailA)\n                    || (block_x && video->intraAvailB)\n                    || (video->intraAvailA && video->intraAvailD && video->intraAvailB))\n            {\n                Intra_4x4_Diagonal_Horizontal_Down(video, pitch, block_offset);\n            }\n            else\n            {\n                return AVC_FAIL;\n            }\n            break;\n\n        case AVC_I4_Vertical_Left:      /* Intra_4x4_Vertical_Left */\n            /* lookup table may be more appropriate for this case  */\n            if (block_y == 0 && !video->intraAvailB)\n            {\n                return AVC_FAIL;\n            }\n\n            availability.top_right = BlkTopRight[(block_y<<2) + block_x];\n\n            if (availability.top_right == 2)\n            {\n                availability.top_right = video->intraAvailB;\n            }\n            else if (availability.top_right == 3)\n            {\n                availability.top_right = video->intraAvailC;\n            }\n\n            Intra_4x4_Vertical_Left(video,  block_offset, &availability);\n            break;\n\n        case AVC_I4_Horizontal_Up:      /* Intra_4x4_Horizontal_Up */\n            if (block_x || video->intraAvailA)\n            {\n                Intra_4x4_Horizontal_Up(video, pitch, block_offset);\n            }\n            else\n            {\n                return AVC_FAIL;\n            }\n            break;\n\n\n        default:\n\n            break;\n    }\n\n    return AVC_SUCCESS;\n}\n\n\n/* =============================== BEGIN 4x4\nMODES======================================*/\nvoid Intra_4x4_Vertical(AVCCommonObj *video,  int block_offset)\n{\n    uint8 *comp_ref = video->pintra_pred_top;\n    uint32 temp;\n    uint8 *pred = video->pred_block + block_offset;\n    int pred_pitch = video->pred_pitch;\n\n    /*P = (int) *comp_ref++;\n    Q = (int) *comp_ref++;\n    R = (int) *comp_ref++;\n    S = (int) *comp_ref++;\n    temp = S|(R<<8)|(Q<<16)|(P<<24);*/\n    temp = *((uint32*)comp_ref);\n\n    *((uint32*)pred) =  temp; /* write 4 at a time */\n    pred += pred_pitch;\n    *((uint32*)pred) =  temp;\n    pred += pred_pitch;\n    *((uint32*)pred) =  temp;\n    pred += pred_pitch;\n    *((uint32*)pred) =  temp;\n\n    return ;\n}\n\nvoid Intra_4x4_Horizontal(AVCCommonObj *video, int pitch, int block_offset)\n{\n    uint8   *comp_ref = video->pintra_pred_left;\n    uint32 temp;\n    int P;\n    uint8 *pred = video->pred_block + block_offset;\n    int pred_pitch = video->pred_pitch;\n\n    P = *comp_ref;\n    temp = P | (P << 8);\n    temp = temp | (temp << 16);\n    *((uint32*)pred) = temp;\n    pred += pred_pitch;\n    comp_ref += pitch;\n    P = *comp_ref;\n    temp = P | (P << 8);\n    temp = temp | (temp << 16);\n    *((uint32*)pred) = temp;\n    pred += pred_pitch;\n    comp_ref += pitch;\n    P = *comp_ref;\n    temp = P | (P << 8);\n    temp = temp | (temp << 16);\n    *((uint32*)pred) = temp;\n    pred += pred_pitch;\n    comp_ref += pitch;\n    P = *comp_ref;\n    temp = P | (P << 8);\n    temp = temp | (temp << 16);\n    *((uint32*)pred) = temp;\n\n    return ;\n}\n\nvoid Intra_4x4_DC(AVCCommonObj *video, int pitch, int block_offset,\n                  AVCNeighborAvailability *availability)\n{\n    uint8   *comp_ref = video->pintra_pred_left;\n    uint32  temp;\n    int DC;\n    uint8 *pred = video->pred_block + block_offset;\n    int pred_pitch = video->pred_pitch;\n\n    if (availability->left)\n    {\n        DC = *comp_ref;\n        comp_ref += pitch;\n        DC += *comp_ref;\n        comp_ref += pitch;\n        DC += *comp_ref;\n        comp_ref += pitch;\n        DC += *comp_ref;\n        comp_ref = video->pintra_pred_top;\n\n        if (availability->top)\n        {\n            DC = (comp_ref[0] + comp_ref[1] + comp_ref[2] + comp_ref[3] + DC + 4) >> 3;\n        }\n        else\n        {\n            DC = (DC + 2) >> 2;\n\n        }\n    }\n    else if (availability->top)\n    {\n        comp_ref = video->pintra_pred_top;\n        DC = (comp_ref[0] + comp_ref[1] + comp_ref[2] + comp_ref[3] + 2) >> 2;\n\n    }\n    else\n    {\n        DC = 128;\n    }\n\n    temp = DC | (DC << 8);\n    temp = temp | (temp << 16);\n    *((uint32*)pred) = temp;\n    pred += pred_pitch;\n    *((uint32*)pred) = temp;\n    pred += pred_pitch;\n    *((uint32*)pred) = temp;\n    pred += pred_pitch;\n    *((uint32*)pred) = temp;\n\n    return ;\n}\n\nvoid Intra_4x4_Down_Left(AVCCommonObj *video, int block_offset,\n                         AVCNeighborAvailability *availability)\n{\n    uint8   *comp_refx = video->pintra_pred_top;\n    uint32 temp;\n    int r0, r1, r2, r3, r4, r5, r6, r7;\n    uint8 *pred = video->pred_block + block_offset;\n    int pred_pitch = video->pred_pitch;\n\n    r0 = *comp_refx++;\n    r1 = *comp_refx++;\n    r2 = *comp_refx++;\n    r3 = *comp_refx++;\n    if (availability->top_right)\n    {\n        r4 = *comp_refx++;\n        r5 = *comp_refx++;\n        r6 = *comp_refx++;\n        r7 = *comp_refx++;\n    }\n    else\n    {\n        r4 = r3;\n        r5 = r3;\n        r6 = r3;\n        r7 = r3;\n    }\n\n    r0 += (r1 << 1);\n    r0 += r2;\n    r0 += 2;\n    r0 >>= 2;\n    r1 += (r2 << 1);\n    r1 += r3;\n    r1 += 2;\n    r1 >>= 2;\n    r2 += (r3 << 1);\n    r2 += r4;\n    r2 += 2;\n    r2 >>= 2;\n    r3 += (r4 << 1);\n    r3 += r5;\n    r3 += 2;\n    r3 >>= 2;\n    r4 += (r5 << 1);\n    r4 += r6;\n    r4 += 2;\n    r4 >>= 2;\n    r5 += (r6 << 1);\n    r5 += r7;\n    r5 += 2;\n    r5 >>= 2;\n    r6 += (3 * r7);\n    r6 += 2;\n    r6 >>= 2;\n\n    temp = r0 | (r1 << 8);\n    temp |= (r2 << 16);\n    temp |= (r3 << 24);\n    *((uint32*)pred) = temp;\n    pred += pred_pitch;\n\n    temp = (temp >> 8) | (r4 << 24);\n    *((uint32*)pred) = temp;\n    pred += pred_pitch;\n\n    temp = (temp >> 8) | (r5 << 24);\n    *((uint32*)pred) = temp;\n    pred += pred_pitch;\n\n    temp = (temp >> 8) | (r6 << 24);\n    *((uint32*)pred) = temp;\n\n    return ;\n}\n\nvoid Intra_4x4_Diagonal_Down_Right(AVCCommonObj *video, int pitch, int\n                                   block_offset)\n{\n    uint8 *comp_refx = video->pintra_pred_top;\n    uint8 *comp_refy = video->pintra_pred_left;\n    uint32 temp;\n    int P_x, Q_x, R_x, P_y, Q_y, R_y, D;\n    int x0, x1, x2;\n    uint8 *pred = video->pred_block + block_offset;\n    int pred_pitch = video->pred_pitch;\n\n    temp = *((uint32*)comp_refx); /* read 4 bytes */\n    x0 = temp & 0xFF;\n    x1 = (temp >> 8) & 0xFF;\n    x2 = (temp >> 16) & 0xFF;\n\n    Q_x = (x0 + 2 * x1 + x2 + 2) >> 2;\n    R_x = (x1 + 2 * x2 + (temp >> 24) + 2) >> 2;\n\n    x2 = video->intra_pred_topleft; /* re-use x2 instead of y0 */\n    P_x = (x2 + 2 * x0 + x1 + 2) >> 2;\n\n    x1 = *comp_refy;\n    comp_refy += pitch; /* re-use x1 instead of y1 */\n    D = (x0 + 2 * x2 + x1 + 2) >> 2;\n\n    x0 = *comp_refy;\n    comp_refy += pitch; /* re-use x0 instead of y2 */\n    P_y = (x2 + 2 * x1 + x0 + 2) >> 2;\n\n    x2 = *comp_refy;\n    comp_refy += pitch; /* re-use x2 instead of y3 */\n    Q_y = (x1 + 2 * x0 + x2 + 2) >> 2;\n\n    x1 = *comp_refy;                    /* re-use x1 instead of y4 */\n    R_y = (x0 + 2 * x2 + x1 + 2) >> 2;\n\n    /* we can pack these  */\n    temp =  D | (P_x << 8);   //[D   P_x Q_x R_x]\n    //[P_y D   P_x Q_x]\n    temp |= (Q_x << 16); //[Q_y P_y D   P_x]\n    temp |= (R_x << 24);  //[R_y Q_y P_y D  ]\n    *((uint32*)pred) = temp;\n    pred += pred_pitch;\n\n    temp =  P_y | (D << 8);\n    temp |= (P_x << 16);\n    temp |= (Q_x << 24);\n    *((uint32*)pred) = temp;\n    pred += pred_pitch;\n\n    temp =  Q_y | (P_y << 8);\n    temp |= (D << 16);\n    temp |= (P_x << 24);\n    *((uint32*)pred) = temp;\n    pred += pred_pitch;\n\n    temp = R_y | (Q_y << 8);\n    temp |= (P_y << 16);\n    temp |= (D << 24);\n    *((uint32*)pred) = temp;\n\n    return ;\n}\n\nvoid    Intra_4x4_Diagonal_Vertical_Right(AVCCommonObj *video, int pitch, int block_offset)\n{\n    uint8   *comp_refx = video->pintra_pred_top;\n    uint8   *comp_refy = video->pintra_pred_left;\n    uint32 temp;\n    int P0, Q0, R0, S0, P1, Q1, R1, P2, Q2, D;\n    int x0, x1, x2;\n    uint8 *pred = video->pred_block + block_offset;\n    int pred_pitch = video->pred_pitch;\n\n    x0 = *comp_refx++;\n    x1 = *comp_refx++;\n    Q0 = x0 + x1 + 1;\n\n    x2 = *comp_refx++;\n    R0 = x1 + x2 + 1;\n\n    x1 = *comp_refx++; /* reuse x1 instead of x3 */\n    S0 = x2 + x1 + 1;\n\n    x1 = video->intra_pred_topleft; /* reuse x1 instead of y0 */\n    P0 = x1 + x0 + 1;\n\n    x2 = *comp_refy;\n    comp_refy += pitch; /* reuse x2 instead of y1 */\n    D = (x2 + 2 * x1 + x0 + 2) >> 2;\n\n    P1 = (P0 + Q0) >> 2;\n    Q1 = (Q0 + R0) >> 2;\n    R1 = (R0 + S0) >> 2;\n\n    P0 >>= 1;\n    Q0 >>= 1;\n    R0 >>= 1;\n    S0 >>= 1;\n\n    x0 = *comp_refy;\n    comp_refy += pitch; /* reuse x0 instead of y2 */\n    P2 = (x1 + 2 * x2 + x0 + 2) >> 2;\n    x1 = *comp_refy;\n    comp_refy += pitch; /* reuse x1 instead of y3 */\n    Q2 = (x2 + 2 * x0 + x1 + 2) >> 2;\n\n    temp =  P0 | (Q0 << 8);  //[P0 Q0 R0 S0]\n    //[D  P1 Q1 R1]\n    temp |= (R0 << 16); //[P2 P0 Q0 R0]\n    temp |= (S0 << 24); //[Q2 D  P1 Q1]\n    *((uint32*)pred) =  temp;\n    pred += pred_pitch;\n\n    temp =  D | (P1 << 8);\n    temp |= (Q1 << 16);\n    temp |= (R1 << 24);\n    *((uint32*)pred) =  temp;\n    pred += pred_pitch;\n\n    temp = P2 | (P0 << 8);\n    temp |= (Q0 << 16);\n    temp |= (R0 << 24);\n    *((uint32*)pred) =  temp;\n    pred += pred_pitch;\n\n    temp = Q2 | (D << 8);\n    temp |= (P1 << 16);\n    temp |= (Q1 << 24);\n    *((uint32*)pred) =  temp;\n\n    return ;\n}\n\nvoid Intra_4x4_Diagonal_Horizontal_Down(AVCCommonObj *video, int pitch,\n                                        int block_offset)\n{\n    uint8   *comp_refx = video->pintra_pred_top;\n    uint8   *comp_refy = video->pintra_pred_left;\n    uint32 temp;\n    int P0, Q0, R0, S0, P1, Q1, R1, P2, Q2, D;\n    int x0, x1, x2;\n    uint8 *pred = video->pred_block + block_offset;\n    int pred_pitch = video->pred_pitch;\n\n    x0 = *comp_refx++;\n    x1 = *comp_refx++;\n    x2 = *comp_refx++;\n    Q2 = (x0 + 2 * x1 + x2 + 2) >> 2;\n\n    x2 = video->intra_pred_topleft; /* reuse x2 instead of y0 */\n    P2 = (x2 + 2 * x0 + x1 + 2) >> 2;\n\n    x1 = *comp_refy;\n    comp_refy += pitch; /* reuse x1 instead of y1 */\n    D = (x1 + 2 * x2 + x0 + 2) >> 2;\n    P0 = x2 + x1 + 1;\n\n    x0 = *comp_refy;\n    comp_refy += pitch; /* reuse x0 instead of y2 */\n    Q0 = x1 + x0 + 1;\n\n    x1 = *comp_refy;\n    comp_refy += pitch; /* reuse x1 instead of y3 */\n    R0 = x0 + x1 + 1;\n\n    x2 = *comp_refy;    /* reuse x2 instead of y4 */\n    S0 = x1 + x2 + 1;\n\n    P1 = (P0 + Q0) >> 2;\n    Q1 = (Q0 + R0) >> 2;\n    R1 = (R0 + S0) >> 2;\n\n    P0 >>= 1;\n    Q0 >>= 1;\n    R0 >>= 1;\n    S0 >>= 1;\n\n\n    /* we can pack these  */\n    temp = P0 | (D << 8);   //[P0 D  P2 Q2]\n    //[Q0 P1 P0 D ]\n    temp |= (P2 << 16);  //[R0 Q1 Q0 P1]\n    temp |= (Q2 << 24); //[S0 R1 R0 Q1]\n    *((uint32*)pred) = temp;\n    pred += pred_pitch;\n\n    temp = Q0 | (P1 << 8);\n    temp |= (P0 << 16);\n    temp |= (D << 24);\n    *((uint32*)pred) = temp;\n    pred += pred_pitch;\n\n    temp = R0 | (Q1 << 8);\n    temp |= (Q0 << 16);\n    temp |= (P1 << 24);\n    *((uint32*)pred) = temp;\n    pred += pred_pitch;\n\n    temp = S0 | (R1 << 8);\n    temp |= (R0 << 16);\n    temp |= (Q1 << 24);\n    *((uint32*)pred) = temp;\n\n    return ;\n}\n\nvoid Intra_4x4_Vertical_Left(AVCCommonObj *video, int block_offset, AVCNeighborAvailability *availability)\n{\n    uint8   *comp_refx = video->pintra_pred_top;\n    uint32 temp1, temp2;\n    int x0, x1, x2, x3, x4, x5, x6;\n    uint8 *pred = video->pred_block + block_offset;\n    int pred_pitch = video->pred_pitch;\n\n    x0 = *comp_refx++;\n    x1 = *comp_refx++;\n    x2 = *comp_refx++;\n    x3 = *comp_refx++;\n    if (availability->top_right)\n    {\n        x4 = *comp_refx++;\n        x5 = *comp_refx++;\n        x6 = *comp_refx++;\n    }\n    else\n    {\n        x4 = x3;\n        x5 = x3;\n        x6 = x3;\n    }\n\n    x0 += x1 + 1;\n    x1 += x2 + 1;\n    x2 += x3 + 1;\n    x3 += x4 + 1;\n    x4 += x5 + 1;\n    x5 += x6 + 1;\n\n    temp1 = (x0 >> 1);\n    temp1 |= ((x1 >> 1) << 8);\n    temp1 |= ((x2 >> 1) << 16);\n    temp1 |= ((x3 >> 1) << 24);\n\n    *((uint32*)pred) = temp1;\n    pred += pred_pitch;\n\n    temp2 = ((x0 + x1) >> 2);\n    temp2 |= (((x1 + x2) >> 2) << 8);\n    temp2 |= (((x2 + x3) >> 2) << 16);\n    temp2 |= (((x3 + x4) >> 2) << 24);\n\n    *((uint32*)pred) = temp2;\n    pred += pred_pitch;\n\n    temp1 = (temp1 >> 8) | ((x4 >> 1) << 24);   /* rotate out old value */\n    *((uint32*)pred) = temp1;\n    pred += pred_pitch;\n\n    temp2 = (temp2 >> 8) | (((x4 + x5) >> 2) << 24); /* rotate out old value */\n    *((uint32*)pred) = temp2;\n    pred += pred_pitch;\n\n    return ;\n}\n\nvoid Intra_4x4_Horizontal_Up(AVCCommonObj *video, int pitch, int block_offset)\n{\n    uint8   *comp_refy = video->pintra_pred_left;\n    uint32 temp;\n    int Q0, R0, Q1, D0, D1, P0, P1;\n    int y0, y1, y2, y3;\n    uint8 *pred = video->pred_block + block_offset;\n    int pred_pitch = video->pred_pitch;\n\n    y0 = *comp_refy;\n    comp_refy += pitch;\n    y1 = *comp_refy;\n    comp_refy += pitch;\n    y2 = *comp_refy;\n    comp_refy += pitch;\n    y3 = *comp_refy;\n\n    Q0 = (y1 + y2 + 1) >> 1;\n    Q1 = (y1 + (y2 << 1) + y3 + 2) >> 2;\n    P0 = ((y0 + y1 + 1) >> 1);\n    P1 = ((y0 + (y1 << 1) + y2 + 2) >> 2);\n\n    temp = P0 | (P1 << 8);      // [P0 P1 Q0 Q1]\n    temp |= (Q0 << 16);     // [Q0 Q1 R0 DO]\n    temp |= (Q1 << 24);     // [R0 D0 D1 D1]\n    *((uint32*)pred) = temp;      // [D1 D1 D1 D1]\n    pred += pred_pitch;\n\n    D0 = (y2 + 3 * y3 + 2) >> 2;\n    R0 = (y2 + y3 + 1) >> 1;\n\n    temp = Q0 | (Q1 << 8);\n    temp |= (R0 << 16);\n    temp |= (D0 << 24);\n    *((uint32*)pred) = temp;\n    pred += pred_pitch;\n\n    D1 = y3;\n\n    temp = R0 | (D0 << 8);\n    temp |= (D1 << 16);\n    temp |= (D1 << 24);\n    *((uint32*)pred) = temp;\n    pred += pred_pitch;\n\n    temp = D1 | (D1 << 8);\n    temp |= (temp << 16);\n    *((uint32*)pred) = temp;\n\n    return ;\n}\n/* =============================== END 4x4 MODES======================================*/\nvoid  Intra_16x16_Vertical(AVCCommonObj *video)\n{\n    int i;\n    uint32 temp1, temp2, temp3, temp4;\n    uint8   *comp_ref = video->pintra_pred_top;\n    uint8 *pred = video->pred_block;\n    int pred_pitch = video->pred_pitch;\n\n    temp1 = *((uint32*)comp_ref);\n    comp_ref += 4;\n\n    temp2 = *((uint32*)comp_ref);\n    comp_ref += 4;\n\n    temp3 = *((uint32*)comp_ref);\n    comp_ref += 4;\n\n    temp4 = *((uint32*)comp_ref);\n    comp_ref += 4;\n\n    i = 16;\n    while (i > 0)\n    {\n        *((uint32*)pred) = temp1;\n        *((uint32*)(pred + 4)) = temp2;\n        *((uint32*)(pred + 8)) = temp3;\n        *((uint32*)(pred + 12)) = temp4;\n        pred += pred_pitch;\n        i--;\n    }\n\n    return ;\n}\n\nvoid Intra_16x16_Horizontal(AVCCommonObj *video, int pitch)\n{\n    int i;\n    uint32 temp;\n    uint8 *comp_ref = video->pintra_pred_left;\n    uint8 *pred = video->pred_block;\n    int pred_pitch = video->pred_pitch;\n\n    for (i = 0; i < 16; i++)\n    {\n        temp = *comp_ref;\n        temp |= (temp << 8);\n        temp |= (temp << 16);\n        *((uint32*)pred) = temp;\n        *((uint32*)(pred + 4)) = temp;\n        *((uint32*)(pred + 8)) = temp;\n        *((uint32*)(pred + 12)) = temp;\n        pred += pred_pitch;\n        comp_ref += pitch;\n    }\n}\n\n\nvoid  Intra_16x16_DC(AVCCommonObj *video, int pitch)\n{\n    int i;\n    uint32 temp, temp2;\n    uint8 *comp_ref_x = video->pintra_pred_top;\n    uint8 *comp_ref_y = video->pintra_pred_left;\n    int sum = 0;\n    uint8 *pred = video->pred_block;\n    int pred_pitch = video->pred_pitch;\n\n    if (video->intraAvailB)\n    {\n        temp = *((uint32*)comp_ref_x);\n        comp_ref_x += 4;\n        temp2 = (temp >> 8) & 0xFF00FF;\n        temp &= 0xFF00FF;\n        temp += temp2;\n        sum = temp + (temp >> 16);\n        temp = *((uint32*)comp_ref_x);\n        comp_ref_x += 4;\n        temp2 = (temp >> 8) & 0xFF00FF;\n        temp &= 0xFF00FF;\n        temp += temp2;\n        sum += temp + (temp >> 16);\n        temp = *((uint32*)comp_ref_x);\n        comp_ref_x += 4;\n        temp2 = (temp >> 8) & 0xFF00FF;\n        temp &= 0xFF00FF;\n        temp += temp2;\n        sum += temp + (temp >> 16);\n        temp = *((uint32*)comp_ref_x);\n        comp_ref_x += 4;\n        temp2 = (temp >> 8) & 0xFF00FF;\n        temp &= 0xFF00FF;\n        temp += temp2;\n        sum += temp + (temp >> 16);\n        sum &= 0xFFFF;\n\n        if (video->intraAvailA)\n        {\n            for (i = 0; i < 16; i++)\n            {\n                sum += (*comp_ref_y);\n                comp_ref_y += pitch;\n            }\n            sum = (sum + 16) >> 5;\n        }\n        else\n        {\n            sum = (sum + 8) >> 4;\n        }\n    }\n    else if (video->intraAvailA)\n    {\n        for (i = 0; i < 16; i++)\n        {\n            sum += *comp_ref_y;\n            comp_ref_y += pitch;\n        }\n        sum = (sum + 8) >> 4;\n    }\n    else\n    {\n        sum = 128;\n    }\n\n    temp = sum | (sum << 8);\n    temp |= (temp << 16);\n\n    for (i = 0; i < 16; i++)\n    {\n        *((uint32*)pred) = temp;\n        *((uint32*)(pred + 4)) = temp;\n        *((uint32*)(pred + 8)) = temp;\n        *((uint32*)(pred + 12)) = temp;\n        pred += pred_pitch;\n    }\n\n}\n\nvoid Intra_16x16_Plane(AVCCommonObj *video, int pitch)\n{\n    int i, a_16, b, c, factor_c;\n    uint8 *comp_ref_x = video->pintra_pred_top;\n    uint8 *comp_ref_y = video->pintra_pred_left;\n    uint8 *comp_ref_x0, *comp_ref_x1, *comp_ref_y0, *comp_ref_y1;\n    int H = 0, V = 0 , tmp;\n    uint8 *pred = video->pred_block;\n    uint32 temp;\n    uint8 byte1, byte2, byte3;\n    int value;\n    int pred_pitch = video->pred_pitch;\n\n    comp_ref_x0 = comp_ref_x + 8;\n    comp_ref_x1 = comp_ref_x + 6;\n    comp_ref_y0 = comp_ref_y + (pitch << 3);\n    comp_ref_y1 = comp_ref_y + 6 * pitch;\n\n    for (i = 1; i < 8; i++)\n    {\n        H += i * (*comp_ref_x0++ - *comp_ref_x1--);\n        V += i * (*comp_ref_y0 - *comp_ref_y1);\n        comp_ref_y0 += pitch;\n        comp_ref_y1 -= pitch;\n    }\n\n    H += i * (*comp_ref_x0++ - video->intra_pred_topleft);\n    V += i * (*comp_ref_y0 - *comp_ref_y1);\n\n\n    a_16 = ((*(comp_ref_x + 15) + *(comp_ref_y + 15 * pitch)) << 4) + 16;;\n    b = (5 * H + 32) >> 6;\n    c = (5 * V + 32) >> 6;\n\n    tmp = 0;\n\n    for (i = 0; i < 16; i++)\n    {\n        factor_c = a_16 + c * (tmp++ - 7);\n\n        factor_c -= 7 * b;\n\n        value = factor_c >> 5;\n        factor_c += b;\n        CLIP_RESULT(value)\n        byte1 = value;\n        value = factor_c >> 5;\n        factor_c += b;\n        CLIP_RESULT(value)\n        byte2 = value;\n        value = factor_c >> 5;\n        factor_c += b;\n        CLIP_RESULT(value)\n        byte3 = value;\n        value = factor_c >> 5;\n        factor_c += b;\n        CLIP_RESULT(value)\n        temp = byte1 | (byte2 << 8);\n        temp |= (byte3 << 16);\n        temp |= (value << 24);\n        *((uint32*)pred) = temp;\n\n        value = factor_c >> 5;\n        factor_c += b;\n        CLIP_RESULT(value)\n        byte1 = value;\n        value = factor_c >> 5;\n        factor_c += b;\n        CLIP_RESULT(value)\n        byte2 = value;\n        value = factor_c >> 5;\n        factor_c += b;\n        CLIP_RESULT(value)\n        byte3 = value;\n        value = factor_c >> 5;\n        factor_c += b;\n        CLIP_RESULT(value)\n        temp = byte1 | (byte2 << 8);\n        temp |= (byte3 << 16);\n        temp |= (value << 24);\n        *((uint32*)(pred + 4)) = temp;\n\n        value = factor_c >> 5;\n        factor_c += b;\n        CLIP_RESULT(value)\n        byte1 = value;\n        value = factor_c >> 5;\n        factor_c += b;\n        CLIP_RESULT(value)\n        byte2 = value;\n        value = factor_c >> 5;\n        factor_c += b;\n        CLIP_RESULT(value)\n        byte3 = value;\n        value = factor_c >> 5;\n        factor_c += b;\n        CLIP_RESULT(value)\n        temp = byte1 | (byte2 << 8);\n        temp |= (byte3 << 16);\n        temp |= (value << 24);\n        *((uint32*)(pred + 8)) = temp;\n\n        value = factor_c >> 5;\n        factor_c += b;\n        CLIP_RESULT(value)\n        byte1 = value;\n        value = factor_c >> 5;\n        factor_c += b;\n        CLIP_RESULT(value)\n        byte2 = value;\n        value = factor_c >> 5;\n        factor_c += b;\n        CLIP_RESULT(value)\n        byte3 = value;\n        value = factor_c >> 5;\n        CLIP_RESULT(value)\n        temp = byte1 | (byte2 << 8);\n        temp |= (byte3 << 16);\n        temp |= (value << 24);\n        *((uint32*)(pred + 12)) = temp;\n        pred += pred_pitch;\n    }\n}\n\n/************** Chroma intra prediction *********************/\n\nvoid Intra_Chroma_DC(AVCCommonObj *video, int pitch, uint8 *predCb, uint8 *predCr)\n{\n    int i;\n    uint32 temp, temp2, pred_a, pred_b;\n    uint8 *comp_ref_x, *comp_ref_y;\n    uint8 *comp_ref_cb_x = video->pintra_pred_top_cb;\n    uint8 *comp_ref_cb_y = video->pintra_pred_left_cb;\n    uint8 *comp_ref_cr_x = video->pintra_pred_top_cr;\n    uint8 *comp_ref_cr_y = video->pintra_pred_left_cr;\n    int  component, j;\n    int  sum_x0, sum_x1, sum_y0, sum_y1;\n    int pred_0[2], pred_1[2], pred_2[2], pred_3[2];\n    int pred_pitch = video->pred_pitch;\n    uint8 *pred;\n\n    if (video->intraAvailB & video->intraAvailA)\n    {\n        comp_ref_x = comp_ref_cb_x;\n        comp_ref_y = comp_ref_cb_y;\n        for (i = 0; i < 2; i++)\n        {\n            temp = *((uint32*)comp_ref_x);\n            comp_ref_x += 4;\n            temp2 = (temp >> 8) & 0xFF00FF;\n            temp &= 0xFF00FF;\n            temp += temp2;\n            temp += (temp >> 16);\n            sum_x0 = temp & 0xFFFF;\n\n            temp = *((uint32*)comp_ref_x);\n            temp2 = (temp >> 8) & 0xFF00FF;\n            temp &= 0xFF00FF;\n            temp += temp2;\n            temp += (temp >> 16);\n            sum_x1 = temp & 0xFFFF;\n\n            pred_1[i] = (sum_x1 + 2) >> 2;\n\n            sum_y0 = *comp_ref_y;\n            sum_y0 += *(comp_ref_y += pitch);\n            sum_y0 += *(comp_ref_y += pitch);\n            sum_y0 += *(comp_ref_y += pitch);\n\n            sum_y1 = *(comp_ref_y += pitch);\n            sum_y1 += *(comp_ref_y += pitch);\n            sum_y1 += *(comp_ref_y += pitch);\n            sum_y1 += *(comp_ref_y += pitch);\n\n            pred_2[i] = (sum_y1 + 2) >> 2;\n\n            pred_0[i] = (sum_y0 + sum_x0 + 4) >> 3;\n            pred_3[i] = (sum_y1 + sum_x1 + 4) >> 3;\n\n            comp_ref_x = comp_ref_cr_x;\n            comp_ref_y = comp_ref_cr_y;\n        }\n    }\n\n    else if (video->intraAvailA)\n    {\n        comp_ref_y = comp_ref_cb_y;\n        for (i = 0; i < 2; i++)\n        {\n            sum_y0 = *comp_ref_y;\n            sum_y0 += *(comp_ref_y += pitch);\n            sum_y0 += *(comp_ref_y += pitch);\n            sum_y0 += *(comp_ref_y += pitch);\n\n            sum_y1 = *(comp_ref_y += pitch);\n            sum_y1 += *(comp_ref_y += pitch);\n            sum_y1 += *(comp_ref_y += pitch);\n            sum_y1 += *(comp_ref_y += pitch);\n\n            pred_0[i] = pred_1[i] = (sum_y0 + 2) >> 2;\n            pred_2[i] = pred_3[i] = (sum_y1 + 2) >> 2;\n            comp_ref_y = comp_ref_cr_y;\n        }\n    }\n    else if (video->intraAvailB)\n    {\n        comp_ref_x = comp_ref_cb_x;\n        for (i = 0; i < 2; i++)\n        {\n            temp = *((uint32*)comp_ref_x);\n            comp_ref_x += 4;\n            temp2 = (temp >> 8) & 0xFF00FF;\n            temp &= 0xFF00FF;\n            temp += temp2;\n            temp += (temp >> 16);\n            sum_x0 = temp & 0xFFFF;\n\n            temp = *((uint32*)comp_ref_x);\n            temp2 = (temp >> 8) & 0xFF00FF;\n            temp &= 0xFF00FF;\n            temp += temp2;\n            temp += (temp >> 16);\n            sum_x1 = temp & 0xFFFF;\n\n            pred_0[i] = pred_2[i] = (sum_x0 + 2) >> 2;\n            pred_1[i] = pred_3[i] = (sum_x1 + 2) >> 2;\n            comp_ref_x = comp_ref_cr_x;\n        }\n    }\n    else\n    {\n        pred_0[0] = pred_0[1] = pred_1[0] = pred_1[1] =\n                                                pred_2[0] = pred_2[1] = pred_3[0] = pred_3[1] = 128;\n    }\n\n    pred = predCb;\n    for (component = 0; component < 2; component++)\n    {\n        pred_a = pred_0[component];\n        pred_b = pred_1[component];\n        pred_a |= (pred_a << 8);\n        pred_a |= (pred_a << 16);\n        pred_b |= (pred_b << 8);\n        pred_b |= (pred_b << 16);\n\n        for (i = 4; i < 6; i++)\n        {\n            for (j = 0; j < 4; j++) /* 4 lines */\n            {\n                *((uint32*)pred) = pred_a;\n                *((uint32*)(pred + 4)) = pred_b;\n                pred += pred_pitch; /* move to the next line */\n            }\n            pred_a = pred_2[component];\n            pred_b = pred_3[component];\n            pred_a |= (pred_a << 8);\n            pred_a |= (pred_a << 16);\n            pred_b |= (pred_b << 8);\n            pred_b |= (pred_b << 16);\n        }\n        pred = predCr; /* point to cr */\n    }\n}\n\nvoid  Intra_Chroma_Horizontal(AVCCommonObj *video, int pitch, uint8 *predCb, uint8 *predCr)\n{\n    int i;\n    uint32 temp;\n    uint8   *comp_ref_cb_y = video->pintra_pred_left_cb;\n    uint8   *comp_ref_cr_y = video->pintra_pred_left_cr;\n    uint8  *comp;\n    int component, j;\n    int     pred_pitch = video->pred_pitch;\n    uint8   *pred;\n\n    comp = comp_ref_cb_y;\n    pred = predCb;\n    for (component = 0; component < 2; component++)\n    {\n        for (i = 4; i < 6; i++)\n        {\n            for (j = 0; j < 4; j++)\n            {\n                temp = *comp;\n                comp += pitch;\n                temp |= (temp << 8);\n                temp |= (temp << 16);\n                *((uint32*)pred) = temp;\n                *((uint32*)(pred + 4)) = temp;\n                pred += pred_pitch;\n            }\n        }\n        comp = comp_ref_cr_y;\n        pred = predCr; /* point to cr */\n    }\n\n}\n\nvoid  Intra_Chroma_Vertical(AVCCommonObj *video, uint8 *predCb, uint8 *predCr)\n{\n    uint32  temp1, temp2;\n    uint8   *comp_ref_cb_x = video->pintra_pred_top_cb;\n    uint8   *comp_ref_cr_x = video->pintra_pred_top_cr;\n    uint8   *comp_ref;\n    int     component, j;\n    int     pred_pitch = video->pred_pitch;\n    uint8   *pred;\n\n    comp_ref = comp_ref_cb_x;\n    pred = predCb;\n    for (component = 0; component < 2; component++)\n    {\n        temp1 = *((uint32*)comp_ref);\n        temp2 = *((uint32*)(comp_ref + 4));\n        for (j = 0; j < 8; j++)\n        {\n            *((uint32*)pred) = temp1;\n            *((uint32*)(pred + 4)) = temp2;\n            pred += pred_pitch;\n        }\n        comp_ref = comp_ref_cr_x;\n        pred = predCr; /* point to cr */\n    }\n\n}\n\nvoid  Intra_Chroma_Plane(AVCCommonObj *video, int pitch, uint8 *predCb, uint8 *predCr)\n{\n    int i;\n    int a_16_C[2], b_C[2], c_C[2], a_16, b, c, factor_c;\n    uint8 *comp_ref_x, *comp_ref_y, *comp_ref_x0, *comp_ref_x1,  *comp_ref_y0, *comp_ref_y1;\n    int component, j;\n    int H, V, tmp;\n    uint32 temp;\n    uint8 byte1, byte2, byte3;\n    int value;\n    uint8 topleft;\n    int pred_pitch = video->pred_pitch;\n    uint8 *pred;\n\n    comp_ref_x = video->pintra_pred_top_cb;\n    comp_ref_y = video->pintra_pred_left_cb;\n    topleft = video->intra_pred_topleft_cb;\n\n    for (component = 0; component < 2; component++)\n    {\n        H = V = 0;\n        comp_ref_x0 = comp_ref_x + 4;\n        comp_ref_x1 = comp_ref_x + 2;\n        comp_ref_y0 = comp_ref_y + (pitch << 2);\n        comp_ref_y1 = comp_ref_y + (pitch << 1);\n        for (i = 1; i < 4; i++)\n        {\n            H += i * (*comp_ref_x0++ - *comp_ref_x1--);\n            V += i * (*comp_ref_y0 - *comp_ref_y1);\n            comp_ref_y0 += pitch;\n            comp_ref_y1 -= pitch;\n        }\n        H += i * (*comp_ref_x0++ - topleft);\n        V += i * (*comp_ref_y0 - *comp_ref_y1);\n\n        a_16_C[component] = ((*(comp_ref_x + 7) + *(comp_ref_y + 7 * pitch)) << 4) + 16;\n        b_C[component] = (17 * H + 16) >> 5;\n        c_C[component] = (17 * V + 16) >> 5;\n\n        comp_ref_x = video->pintra_pred_top_cr;\n        comp_ref_y = video->pintra_pred_left_cr;\n        topleft = video->intra_pred_topleft_cr;\n    }\n\n    pred = predCb;\n    for (component = 0; component < 2; component++)\n    {\n        a_16 = a_16_C[component];\n        b = b_C[component];\n        c = c_C[component];\n        tmp = 0;\n        for (i = 4; i < 6; i++)\n        {\n            for (j = 0; j < 4; j++)\n            {\n                factor_c = a_16 + c * (tmp++ - 3);\n\n                factor_c -= 3 * b;\n\n                value = factor_c >> 5;\n                factor_c += b;\n                CLIP_RESULT(value)\n                byte1 = value;\n                value = factor_c >> 5;\n                factor_c += b;\n                CLIP_RESULT(value)\n                byte2 = value;\n                value = factor_c >> 5;\n                factor_c += b;\n                CLIP_RESULT(value)\n                byte3 = value;\n                value = factor_c >> 5;\n                factor_c += b;\n                CLIP_RESULT(value)\n                temp = byte1 | (byte2 << 8);\n                temp |= (byte3 << 16);\n                temp |= (value << 24);\n                *((uint32*)pred) = temp;\n\n                value = factor_c >> 5;\n                factor_c += b;\n                CLIP_RESULT(value)\n                byte1 = value;\n                value = factor_c >> 5;\n                factor_c += b;\n                CLIP_RESULT(value)\n                byte2 = value;\n                value = factor_c >> 5;\n                factor_c += b;\n                CLIP_RESULT(value)\n                byte3 = value;\n                value = factor_c >> 5;\n                factor_c += b;\n                CLIP_RESULT(value)\n                temp = byte1 | (byte2 << 8);\n                temp |= (byte3 << 16);\n                temp |= (value << 24);\n                *((uint32*)(pred + 4)) = temp;\n                pred += pred_pitch;\n            }\n        }\n        pred = predCr; /* point to cr */\n    }\n}\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/src/pvavcdecoder.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"pvavcdecoder.h\"\n#include \"oscl_mem.h\"\n\n// xxx pa\n#define LOG_TAG \"pvavcdecoder\"\n#include \"android/log.h\"\n\n\n/* global static functions */\n\nvoid CbAvcDecDebugLog(uint32 *userData, AVCLogType type, char *string1, int val1, int val2)\n{\n    OSCL_UNUSED_ARG(userData);\n    OSCL_UNUSED_ARG(type);\n    OSCL_UNUSED_ARG(string1);\n    OSCL_UNUSED_ARG(val1);\n    OSCL_UNUSED_ARG(val2);\n\n    return ;\n}\n\nint CbAvcDecMalloc(void *userData, int32 size, int attribute)\n{\n    OSCL_UNUSED_ARG(userData);\n    OSCL_UNUSED_ARG(attribute);\n\n    uint8 *mem;\n\n    mem = (uint8*) oscl_malloc(size);\n\n    return (int)mem;\n}\n\nvoid CbAvcDecFree(void *userData, int mem)\n{\n    OSCL_UNUSED_ARG(userData);\n\n    oscl_free((void*)mem);\n\n    return ;\n}\n\nint CbAvcDecDPBAlloc(void *userData, uint frame_size_in_mbs, uint num_buffers)\n{\n    PVAVCDecoder *pAvcDec = (PVAVCDecoder*) userData;\n\n    return pAvcDec->AVC_DPBAlloc(frame_size_in_mbs, num_buffers);\n}\n\nvoid CbAvcDecFrameUnbind(void *userData, int indx)\n{\n    PVAVCDecoder *pAvcDec = (PVAVCDecoder*) userData;\n\n    pAvcDec->AVC_FrameUnbind(indx);\n\n    return ;\n}\n\nint CbAvcDecFrameBind(void *userData, int indx, uint8 **yuv)\n{\n    PVAVCDecoder *pAvcDec = (PVAVCDecoder*) userData;\n\n    return pAvcDec->AVC_FrameBind(indx, yuv);\n}\n\n\n\n/* ///////////////////////////////////////////////////////////////////////// */\nPVAVCDecoder::PVAVCDecoder()\n{\n\n//iDecoderControl\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nPVAVCDecoder::~PVAVCDecoder()\n{\n    CleanUpAVCDecoder();\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nPVAVCDecoder* PVAVCDecoder::New(void)\n{\n    PVAVCDecoder* self = new PVAVCDecoder;\n    if (self && self->Construct())\n        return self;\n    if (self)\n        delete self;\n    return NULL;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nbool PVAVCDecoder::Construct()\n{\n    oscl_memset((void*)&iAvcHandle, 0, sizeof(AVCHandle));\n\n    // xxx pa callback setter\n    iAvcHandle.CBAVC_DPBAlloc = &CbAvcDecDPBAlloc;\n    iAvcHandle.CBAVC_FrameBind = &CbAvcDecFrameBind;\n    iAvcHandle.CBAVC_FrameUnbind = &CbAvcDecFrameUnbind;\n    iAvcHandle.CBAVC_Free = &CbAvcDecFree;\n    iAvcHandle.CBAVC_Malloc = &CbAvcDecMalloc;\n    iAvcHandle.CBAVC_DebugLog = &CbAvcDecDebugLog;\n    iAvcHandle.userData = this;\n\n    iFramePtr = NULL;\n    iDPB = NULL;\n    iFrameUsed = NULL;\n    iNumFrames = NULL;\n\n    return true;\n}\n\n/////////////////////////////////////////////////////////////////////////////\nvoid PVAVCDecoder::CleanUpAVCDecoder(void)\n{\n    PVAVCCleanUpDecoder((AVCHandle *)&iAvcHandle);\n}\n\n\nvoid PVAVCDecoder::ResetAVCDecoder(void)\n{\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"ResetAVCDecoder START\");\n\n    PVAVCDecReset((AVCHandle *)&iAvcHandle);\n    \n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"ResetAVCDecoder END\");\n\n}\n\n/////////////////////////////////////////////////////////////////////////////\n\nint32 PVAVCDecoder::DecodeSPS(uint8 *bitstream, int32 buffer_size)\n{\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"DecodeSPS\");\n\n    return PVAVCDecSeqParamSet((AVCHandle *)&iAvcHandle, bitstream, buffer_size);\n}\n\nint32 PVAVCDecoder::DecodePPS(uint8 *bitstream, int32 buffer_size)\n{\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"DecodePPS\");\n\n    return PVAVCDecPicParamSet((AVCHandle *)&iAvcHandle, bitstream, buffer_size);\n}\n\nint32 PVAVCDecoder::DecodeAVCSlice(uint8 *bitstream, int32 *buffer_size)\n{\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"DecodeAVCSlice\");\n\n    return (PVAVCDecodeSlice((AVCHandle *)&iAvcHandle, bitstream, *buffer_size));\n}\n\nbool PVAVCDecoder::GetDecOutput(int *indx, int *release, AVCFrameIO* output)\n{\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"GetDecOutput\");\n\n    return (PVAVCDecGetOutput((AVCHandle *)&iAvcHandle, indx, release, output) != AVCDEC_SUCCESS) ? false : true;\n}\n\n\nvoid PVAVCDecoder::GetVideoDimensions(int32 *width, int32 *height, int32 *top, int32 *left, int32 *bottom, int32 *right)\n{\n    AVCDecSPSInfo seqInfo;\n    PVAVCDecGetSeqInfo((AVCHandle *)&iAvcHandle, &seqInfo);\n    *width = seqInfo.FrameWidth;\n    *height = seqInfo.FrameHeight;\n\n    /* assuming top left corner aligned */\n    *top = seqInfo.frame_crop_top;\n    *left = seqInfo.frame_crop_left;\n    *bottom = seqInfo.frame_crop_bottom;\n    *right = seqInfo.frame_crop_right;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\n\nint PVAVCDecoder::AVC_DPBAlloc(uint frame_size_in_mbs, uint num_buffers)\n{\n    int ii;\n    uint frame_size = (frame_size_in_mbs << 8) + (frame_size_in_mbs << 7);\n\n    if (iDPB) oscl_free(iDPB); // free previous one first\n\n    iDPB = (uint8*) oscl_malloc(sizeof(uint8) * frame_size * num_buffers);\n    if (iDPB == NULL)\n    {\n        return 0;\n    }\n\n    iNumFrames = num_buffers;\n\n    if (iFrameUsed) oscl_free(iFrameUsed); // free previous one\n\n    iFrameUsed = (bool*) oscl_malloc(sizeof(bool) * num_buffers);\n    if (iFrameUsed == NULL)\n    {\n        return 0;\n    }\n\n    if (iFramePtr) oscl_free(iFramePtr); // free previous one\n    iFramePtr = (uint8**) oscl_malloc(sizeof(uint8*) * num_buffers);\n    if (iFramePtr == NULL)\n    {\n        return 0;\n    }\n\n    iFramePtr[0] = iDPB;\n    iFrameUsed[0] = false;\n\n    for (ii = 1; ii < (int)num_buffers; ii++)\n    {\n        iFrameUsed[ii] = false;\n        iFramePtr[ii] = iFramePtr[ii-1] + frame_size;\n    }\n\n    return 1;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nvoid PVAVCDecoder::AVC_FrameUnbind(int indx)\n{\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"AVC_FrameUnbind(%d)\", indx);\n\n    if (indx < iNumFrames)\n    {\n        iFrameUsed[indx] = false;\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"AVC_FrameUnbind iFrameUsed[indx(%d)] = false;\", indx);\n    }\n\n    return ;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nint PVAVCDecoder::AVC_FrameBind(int indx, uint8** yuv)\n{\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"AVC_FrameBind(%d)\", indx);\n\n    if ((iFrameUsed[indx] == true) || (indx >= iNumFrames))\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"AVC_FrameBind return 0: (iFrameUsed[indx] == true) --> %d // (indx (%d) >= iNumFrames (%d)) --> %d\", (iFrameUsed[indx] == true), indx, iNumFrames, (indx >= iNumFrames));\n\n        return 0; // already in used\n    }\n\n    iFrameUsed[indx] = true;\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"AVC_FrameBind iFrameUsed[indx(%d)] = true;\", indx);\n\n    *yuv = iFramePtr[indx];\n    \n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"AVC_FrameBind final return 1\");\n\n    return 1;\n}\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/src/pvavcdecoder_factory.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/**\n * @file pvavcdecoder_factory.cpp\n * @brief Singleton factory for PVAVCDecoder\n */\n\n#include \"oscl_base.h\"\n\n#include \"pvavcdecoder.h\"\n#include \"pvavcdecoder_factory.h\"\n\n#include \"oscl_error_codes.h\"\n#include \"oscl_exception.h\"\n\n// Use default DLL entry point\n#include \"oscl_dll.h\"\n\nOSCL_DLL_ENTRY_POINT_DEFAULT()\n\n\n////////////////////////////////////////////////////////////////////////////\nOSCL_EXPORT_REF PVAVCDecoderInterface* PVAVCDecoderFactory::CreatePVAVCDecoder()\n{\n    PVAVCDecoderInterface* videodec = NULL;\n    videodec = PVAVCDecoder::New();\n    if (videodec == NULL)\n    {\n        OSCL_LEAVE(OsclErrNoMemory);\n    }\n    return videodec;\n}\n\n////////////////////////////////////////////////////////////////////////////\nOSCL_EXPORT_REF bool PVAVCDecoderFactory::DeletePVAVCDecoder(PVAVCDecoderInterface* aVideoDec)\n{\n    if (aVideoDec)\n    {\n        OSCL_DELETE(aVideoDec);\n        return true;\n    }\n\n    return false;\n}\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/src/residual.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"avcdec_lib.h\"\n#include \"avcdec_bitstream.h\"\n#include \"oscl_mem.h\"\n\nAVCDec_Status DecodeIntraPCM(AVCCommonObj *video, AVCDecBitstream *stream)\n{\n    AVCDec_Status status;\n    int j;\n    int mb_x, mb_y, offset1;\n    uint8 *pDst;\n    uint32 byte0, byte1;\n    int pitch;\n\n    mb_x = video->mb_x;\n    mb_y = video->mb_y;\n\n#ifdef USE_PRED_BLOCK\n    pDst = video->pred_block + 84;\n    pitch = 20;\n#else\n    offset1 = (mb_x << 4) + (mb_y << 4) * video->PicWidthInSamplesL;\n    pDst = video->currPic->Sl + offset1;\n    pitch = video->currPic->pitch;\n#endif\n\n    /* at this point bitstream is byte-aligned */\n    j = 16;\n    while (j > 0)\n    {\n        status = BitstreamReadBits(stream, 8, (uint*) & byte0);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 8);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 16);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 24);\n        *((uint32*)pDst) = byte0;\n\n        status = BitstreamReadBits(stream, 8, (uint*) & byte0);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 8);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 16);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 24);\n        *((uint32*)(pDst + 4)) = byte0;\n\n        status = BitstreamReadBits(stream, 8, (uint*) & byte0);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 8);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 16);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 24);\n        *((uint32*)(pDst + 8)) = byte0;\n\n        status = BitstreamReadBits(stream, 8, (uint*) & byte0);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 8);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 16);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 24);\n        *((uint32*)(pDst + 12)) = byte0;\n        j--;\n        pDst += pitch;\n\n        if (status != AVCDEC_SUCCESS)  /* check only once per line */\n            return status;\n    }\n\n#ifdef USE_PRED_BLOCK\n    pDst = video->pred_block + 452;\n    pitch = 12;\n#else\n    offset1 = (offset1 >> 2) + (mb_x << 2);\n    pDst = video->currPic->Scb + offset1;\n    pitch >>= 1;\n#endif\n\n    j = 8;\n    while (j > 0)\n    {\n        status = BitstreamReadBits(stream, 8, (uint*) & byte0);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 8);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 16);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 24);\n        *((uint32*)pDst) = byte0;\n\n        status = BitstreamReadBits(stream, 8, (uint*) & byte0);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 8);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 16);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 24);\n        *((uint32*)(pDst + 4)) = byte0;\n\n        j--;\n        pDst += pitch;\n\n        if (status != AVCDEC_SUCCESS)  /* check only once per line */\n            return status;\n    }\n\n#ifdef USE_PRED_BLOCK\n    pDst = video->pred_block + 596;\n    pitch = 12;\n#else\n    pDst = video->currPic->Scr + offset1;\n#endif\n    j = 8;\n    while (j > 0)\n    {\n        status = BitstreamReadBits(stream, 8, (uint*) & byte0);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 8);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 16);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 24);\n        *((uint32*)pDst) = byte0;\n\n        status = BitstreamReadBits(stream, 8, (uint*) & byte0);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 8);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 16);\n        status = BitstreamReadBits(stream, 8, (uint*) & byte1);\n        byte0 |= (byte1 << 24);\n        *((uint32*)(pDst + 4)) = byte0;\n\n        j--;\n        pDst += pitch;\n\n        if (status != AVCDEC_SUCCESS)  /* check only once per line */\n            return status;\n    }\n\n#ifdef MB_BASED_DEBLOCK\n    SaveNeighborForIntraPred(video, offset1);\n#endif\n\n    return AVCDEC_SUCCESS;\n}\n\n\n\n/* see subclause 7.3.5.3 and readCBPandCoeffsFromNAL() in JM*/\nAVCDec_Status residual(AVCDecObject *decvid, AVCMacroblock *currMB)\n{\n    AVCCommonObj *video = decvid->common;\n    int16 *block;\n    int level[16], run[16], numcoeff; /* output from residual_block_cavlc */\n    int block_x, i, j, k, idx, iCbCr;\n    int mbPartIdx, subMbPartIdx, mbPartIdx_X, mbPartIdx_Y;\n    int nC, maxNumCoeff = 16;\n    int coeffNum, start_scan = 0;\n    uint8 *zz_scan;\n    int Rq, Qq;\n    uint32 cbp4x4 = 0;\n\n    /* in 8.5.4, it only says if it's field macroblock. */\n\n    zz_scan = (uint8*) ZZ_SCAN_BLOCK;\n\n\n    /* see 8.5.8 for the initialization of these values */\n    Qq = video->QPy_div_6;\n    Rq = video->QPy_mod_6;\n\n    oscl_memset(video->block, 0, sizeof(int16)*NUM_PIXELS_IN_MB);\n\n    if (currMB->mbMode == AVC_I16)\n    {\n        nC = predict_nnz(video, 0, 0);\n        decvid->residual_block(decvid, nC, 16, level, run, &numcoeff);\n        /* then performs zigzag and transform */\n        block = video->block;\n        coeffNum = -1;\n        for (i = numcoeff - 1; i >= 0; i--)\n        {\n            coeffNum += run[i] + 1;\n            if (coeffNum > 15)\n            {\n                return AVCDEC_FAIL;\n            }\n            idx = zz_scan[coeffNum] << 2;\n            /*          idx = ((idx>>2)<<6) + ((idx&3)<<2); */\n            block[idx] = level[i];\n        }\n\n        /* inverse transform on Intra16x16DCLevel */\n        if (numcoeff)\n        {\n            Intra16DCTrans(block, Qq, Rq);\n            cbp4x4 = 0xFFFF;\n        }\n        maxNumCoeff = 15;\n        start_scan = 1;\n    }\n\n    oscl_memset(currMB->nz_coeff, 0, sizeof(uint8)*24);\n\n    for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)\n    {\n        mbPartIdx_X = (mbPartIdx & 1) << 1;\n        mbPartIdx_Y = mbPartIdx & -2;\n\n        if (currMB->CBP&(1 << mbPartIdx))\n        {\n            for (subMbPartIdx = 0; subMbPartIdx < 4; subMbPartIdx++)\n            {\n                i = mbPartIdx_X + (subMbPartIdx & 1);  // check this\n                j = mbPartIdx_Y + (subMbPartIdx >> 1);\n                block = video->block + (j << 6) + (i << 2);  //\n                nC = predict_nnz(video, i, j);\n                decvid->residual_block(decvid, nC, maxNumCoeff, level, run, &numcoeff);\n\n                /* convert to raster scan and quantize*/\n                /* Note: for P mb in SP slice and SI mb in SI slice,\n                 the quantization cannot be done here.\n                 block[idx] should be assigned with level[k].\n                itrans will be done after the prediction.\n                There will be transformation on the predicted value,\n                then addition with block[idx], then this quantization\n                and transform.*/\n\n                coeffNum = -1 + start_scan;\n                for (k = numcoeff - 1; k >= 0; k--)\n                {\n                    coeffNum += run[k] + 1;\n                    if (coeffNum > 15)\n                    {\n                        return AVCDEC_FAIL;\n                    }\n                    idx = zz_scan[coeffNum];\n                    block[idx] = (level[k] * dequant_coefres[Rq][coeffNum]) << Qq ;\n                }\n\n                currMB->nz_coeff[(j<<2)+i] = numcoeff;\n                if (numcoeff)\n                {\n                    cbp4x4 |= (1 << ((j << 2) + i));\n                }\n            }\n        }\n    }\n\n    Qq = video->QPc_div_6;\n    Rq = video->QPc_mod_6;\n\n    if (currMB->CBP & (3 << 4)) /* chroma DC residual present */\n    {\n        for (iCbCr = 0; iCbCr < 2; iCbCr++)\n        {\n            decvid->residual_block(decvid, -1, 4, level, run, &numcoeff);\n            block = video->block + 256 + (iCbCr << 3);\n            coeffNum = -1;\n            for (i = numcoeff - 1; i >= 0; i--)\n            {\n                coeffNum += run[i] + 1;\n                if (coeffNum > 3)\n                {\n                    return AVCDEC_FAIL;\n                }\n                block[(coeffNum>>1)*64 + (coeffNum&1)*4] = level[i];\n            }\n            /* inverse transform on chroma DC */\n            /* for P in SP and SI in SI, this function can't be done here,\n            must do prediction transform/quant first. */\n            if (numcoeff)\n            {\n                ChromaDCTrans(block, Qq, Rq);\n                cbp4x4 |= (iCbCr ? 0xcc0000 : 0x330000);\n            }\n        }\n    }\n\n    if (currMB->CBP & (2 << 4))\n    {\n        for (block_x = 0; block_x < 4; block_x += 2) /* for iCbCr */\n        {\n            for (j = 4; j < 6; j++)  /* for each block inside Cb or Cr */\n            {\n                for (i = block_x; i < block_x + 2; i++)\n                {\n\n                    block = video->block + (j << 6) + (i << 2);\n\n                    nC = predict_nnz_chroma(video, i, j);\n                    decvid->residual_block(decvid, nC, 15, level, run, &numcoeff);\n\n                    /* convert to raster scan and quantize */\n                    /* for P MB in SP slice and SI MB in SI slice,\n                       the dequant and transform cannot be done here.\n                       It needs the prediction values. */\n                    coeffNum = 0;\n                    for (k = numcoeff - 1; k >= 0; k--)\n                    {\n                        coeffNum += run[k] + 1;\n                        if (coeffNum > 15)\n                        {\n                            return AVCDEC_FAIL;\n                        }\n                        idx = zz_scan[coeffNum];\n                        block[idx] = (level[k] * dequant_coefres[Rq][coeffNum]) << Qq;\n                    }\n\n\n                    /* then transform */\n                    //              itrans(block); /* transform */\n                    currMB->nz_coeff[(j<<2)+i] = numcoeff;    //\n                    if (numcoeff)\n                    {\n                        cbp4x4 |= (1 << ((j << 2) + i));\n                    }\n                }\n\n            }\n        }\n    }\n\n    video->cbp4x4 = cbp4x4;\n\n    return AVCDEC_SUCCESS;\n}\n\n/* see subclause 7.3.5.3.1 and 9.2 and readCoeff4x4_CAVLC() in JM */\nAVCDec_Status residual_block_cavlc(AVCDecObject *decvid, int nC, int maxNumCoeff,\n                                   int *level, int *run, int *numcoeff)\n{\n    int i, j;\n    int TrailingOnes, TotalCoeff;\n    AVCDecBitstream *stream = decvid->bitstream;\n    int suffixLength;\n    uint trailing_ones_sign_flag, level_prefix, level_suffix;\n    int levelCode, levelSuffixSize, zerosLeft;\n    int run_before;\n\n\n    if (nC >= 0)\n    {\n        ce_TotalCoeffTrailingOnes(stream, &TrailingOnes, &TotalCoeff, nC);\n    }\n    else\n    {\n        ce_TotalCoeffTrailingOnesChromaDC(stream, &TrailingOnes, &TotalCoeff);\n    }\n\n    *numcoeff = TotalCoeff;\n\n    /* This part is done quite differently in ReadCoef4x4_CAVLC() */\n    if (TotalCoeff == 0)\n    {\n        return AVCDEC_SUCCESS;\n    }\n\n    if (TrailingOnes) /* keep reading the sign of those trailing ones */\n    {\n        /* instead of reading one bit at a time, read the whole thing at once */\n        BitstreamReadBits(stream, TrailingOnes, &trailing_ones_sign_flag);\n        trailing_ones_sign_flag <<= 1;\n        for (i = 0; i < TrailingOnes; i++)\n        {\n            level[i] = 1 - ((trailing_ones_sign_flag >> (TrailingOnes - i - 1)) & 2);\n        }\n    }\n\n    i = TrailingOnes;\n    suffixLength = 1;\n    if (TotalCoeff > TrailingOnes)\n    {\n        ce_LevelPrefix(stream, &level_prefix);\n        if (TotalCoeff < 11 || TrailingOnes == 3)\n        {\n            if (level_prefix < 14)\n            {\n//              levelSuffixSize = 0;\n                levelCode = level_prefix;\n            }\n            else if (level_prefix == 14)\n            {\n//              levelSuffixSize = 4;\n                BitstreamReadBits(stream, 4, &level_suffix);\n                levelCode = 14 + level_suffix;\n            }\n            else /* if (level_prefix == 15) */\n            {\n//              levelSuffixSize = 12;\n                BitstreamReadBits(stream, 12, &level_suffix);\n                levelCode = 30 + level_suffix;\n            }\n        }\n        else\n        {\n            /*              suffixLength = 1; */\n            if (level_prefix < 15)\n            {\n                levelSuffixSize = suffixLength;\n            }\n            else\n            {\n                levelSuffixSize = 12;\n            }\n            BitstreamReadBits(stream, levelSuffixSize, &level_suffix);\n\n            levelCode = (level_prefix << 1) + level_suffix;\n        }\n\n        if (TrailingOnes < 3)\n        {\n            levelCode += 2;\n        }\n\n        level[i] = (levelCode + 2) >> 1;\n        if (level[i] > 3)\n        {\n            suffixLength = 2;\n        }\n\n        if (levelCode & 1)\n        {\n            level[i] = -level[i];\n        }\n        i++;\n\n    }\n\n    for (j = TotalCoeff - i; j > 0 ; j--)\n    {\n        ce_LevelPrefix(stream, &level_prefix);\n        if (level_prefix < 15)\n        {\n            levelSuffixSize = suffixLength;\n        }\n        else\n        {\n            levelSuffixSize = 12;\n        }\n        BitstreamReadBits(stream, levelSuffixSize, &level_suffix);\n\n        levelCode = (level_prefix << suffixLength) + level_suffix;\n        level[i] = (levelCode >> 1) + 1;\n        if (level[i] > (3 << (suffixLength - 1)) && suffixLength < 6)\n        {\n            suffixLength++;\n        }\n        if (levelCode & 1)\n        {\n            level[i] = -level[i];\n        }\n        i++;\n    }\n\n\n    if (TotalCoeff < maxNumCoeff)\n    {\n        if (nC >= 0)\n        {\n            ce_TotalZeros(stream, &zerosLeft, TotalCoeff);\n        }\n        else\n        {\n            ce_TotalZerosChromaDC(stream, &zerosLeft, TotalCoeff);\n        }\n    }\n    else\n    {\n        zerosLeft = 0;\n    }\n\n    for (i = 0; i < TotalCoeff - 1; i++)\n    {\n        if (zerosLeft > 0)\n        {\n            ce_RunBefore(stream, &run_before, zerosLeft);\n            run[i] = run_before;\n        }\n        else\n        {\n            run[i] = 0;\n            zerosLeft = 0; // could be negative under error conditions\n        }\n\n        zerosLeft = zerosLeft - run[i];\n    }\n\n    if (zerosLeft < 0)\n    {\n        zerosLeft = 0;\n//      return AVCDEC_FAIL;\n    }\n\n    run[TotalCoeff-1] = zerosLeft;\n\n    /* leave the inverse zigzag scan part for the caller */\n\n\n    return AVCDEC_SUCCESS;\n}\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/src/slice.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/* Note for optimization: syntax decoding or operations related to B_SLICE should be\ncommented out by macro definition or function pointers. */\n\n#include \"oscl_mem.h\"\n#include \"avcdec_lib.h\"\n#include \"avcdec_bitstream.h\"\n\nconst static int mbPart2raster[3][4] = {{0, 0, 0, 0}, {1, 1, 0, 0}, {1, 0, 1, 0}};\n/* decode_frame_slice() */\n/* decode_one_slice() */\nAVCDec_Status DecodeSlice(AVCDecObject *decvid)\n{\n    AVCDec_Status status;\n    AVCCommonObj *video = decvid->common;\n    AVCSliceHeader *sliceHdr = video->sliceHdr;\n    AVCMacroblock *currMB ;\n    AVCDecBitstream *stream = decvid->bitstream;\n    uint slice_group_id;\n    uint CurrMbAddr, moreDataFlag;\n\n    /* set the first mb in slice */\n    CurrMbAddr = sliceHdr->first_mb_in_slice;\n    slice_group_id = video->MbToSliceGroupMap[CurrMbAddr];\n\n    if ((CurrMbAddr && (CurrMbAddr != (uint)(video->mbNum + 1))) && video->currSeqParams->constrained_set1_flag == 1)\n    {\n        ConcealSlice(decvid, video->mbNum, CurrMbAddr);\n    }\n\n    moreDataFlag = 1;\n    video->mb_skip_run = -1;\n\n\n    /* while loop , see subclause 7.3.4 */\n    do\n    {\n        if (CurrMbAddr >= video->PicSizeInMbs)\n        {\n            return AVCDEC_FAIL;\n        }\n\n        currMB = video->currMB = &(video->mblock[CurrMbAddr]);\n        video->mbNum = CurrMbAddr;\n        currMB->slice_id = video->slice_id;  //  slice\n\n        /* we can remove this check if we don't support Mbaff. */\n        /* we can wrap below into an initMB() function which will also\n        do necessary reset of macroblock related parameters. */\n\n        video->mb_x = CurrMbAddr % video->PicWidthInMbs;\n        video->mb_y = CurrMbAddr / video->PicWidthInMbs;\n\n        /* check the availability of neighboring macroblocks */\n        InitNeighborAvailability(video, CurrMbAddr);\n\n        /* read_macroblock and decode_one_macroblock() */\n        status = DecodeMB(decvid);\n        if (status != AVCDEC_SUCCESS)\n        {\n            return status;\n        }\n#ifdef MB_BASED_DEBLOCK\n        if (video->currPicParams->num_slice_groups_minus1 == 0)\n        {\n            MBInLoopDeblock(video); /* MB-based deblocking */\n        }\n        else    /* this mode cannot be used if the number of slice group is not one. */\n        {\n            return AVCDEC_FAIL;\n        }\n#endif\n        video->numMBs--;\n\n        moreDataFlag = more_rbsp_data(stream);\n\n\n        /* go to next MB */\n        while (++CurrMbAddr < video->PicSizeInMbs && video->MbToSliceGroupMap[CurrMbAddr] != (int)slice_group_id)\n        {\n        }\n\n    }\n    while ((moreDataFlag && video->numMBs > 0) || video->mb_skip_run > 0); /* even if no more data, but last few MBs are skipped */\n\n    if (video->numMBs == 0)\n    {\n        video->newPic = TRUE;\n        video->mbNum = 0;  // _Conceal\n        return AVCDEC_PICTURE_READY;\n    }\n\n    return AVCDEC_SUCCESS;\n}\n\n/* read MB mode and motion vectors */\n/* perform Intra/Inter prediction and residue */\n/* update video->mb_skip_run */\nAVCDec_Status DecodeMB(AVCDecObject *decvid)\n{\n    AVCDec_Status status;\n    AVCCommonObj *video = decvid->common;\n    AVCDecBitstream *stream = decvid->bitstream;\n    AVCMacroblock *currMB = video->currMB;\n    uint mb_type;\n    int slice_type = video->slice_type;\n    int temp;\n\n    currMB->QPy = video->QPy;\n    currMB->QPc = video->QPc;\n\n    if (slice_type == AVC_P_SLICE)\n    {\n        if (video->mb_skip_run < 0)\n        {\n            ue_v(stream, (uint *)&(video->mb_skip_run));\n        }\n\n        if (video->mb_skip_run == 0)\n        {\n            /* this will not handle the case where the slice ends with a mb_skip_run == 0 and no following MB data  */\n            ue_v(stream, &mb_type);\n            if (mb_type > 30)\n            {\n                return AVCDEC_FAIL;\n            }\n            InterpretMBModeP(currMB, mb_type);\n            video->mb_skip_run = -1;\n        }\n        else\n        {\n            /* see subclause 7.4.4 for more details on how\n            mb_field_decoding_flag is derived in case of skipped MB */\n\n            currMB->mb_intra = FALSE;\n\n            currMB->mbMode = AVC_SKIP;\n            currMB->MbPartWidth = currMB->MbPartHeight = 16;\n            currMB->NumMbPart = 1;\n            currMB->NumSubMbPart[0] = currMB->NumSubMbPart[1] =\n                                          currMB->NumSubMbPart[2] = currMB->NumSubMbPart[3] = 1; //\n            currMB->SubMbPartWidth[0] = currMB->SubMbPartWidth[1] =\n                                            currMB->SubMbPartWidth[2] = currMB->SubMbPartWidth[3] = currMB->MbPartWidth;\n            currMB->SubMbPartHeight[0] = currMB->SubMbPartHeight[1] =\n                                             currMB->SubMbPartHeight[2] = currMB->SubMbPartHeight[3] = currMB->MbPartHeight;\n\n            oscl_memset(currMB->nz_coeff, 0, sizeof(uint8)*NUM_BLKS_IN_MB);\n\n            currMB->CBP = 0;\n            video->cbp4x4 = 0;\n            /* for skipped MB, always look at the first entry in RefPicList */\n            currMB->RefIdx[0] = currMB->RefIdx[1] =\n                                    currMB->RefIdx[2] = currMB->RefIdx[3] = video->RefPicList0[0]->RefIdx;\n            InterMBPrediction(video);\n            video->mb_skip_run--;\n            return AVCDEC_SUCCESS;\n        }\n\n    }\n    else\n    {\n        /* Then decode mode and MV */\n        ue_v(stream, &mb_type);\n        if (mb_type > 25)\n        {\n            return AVCDEC_FAIL;\n        }\n        InterpretMBModeI(currMB, mb_type);\n    }\n\n\n    if (currMB->mbMode != AVC_I_PCM)\n    {\n\n        if (currMB->mbMode == AVC_P8 || currMB->mbMode == AVC_P8ref0)\n        {\n            status = sub_mb_pred(video, currMB, stream);\n        }\n        else\n        {\n            status = mb_pred(video, currMB, stream) ;\n        }\n\n        if (status != AVCDEC_SUCCESS)\n        {\n            return status;\n        }\n\n        if (currMB->mbMode != AVC_I16)\n        {\n            /* decode coded_block_pattern */\n            status = DecodeCBP(currMB, stream);\n            if (status != AVCDEC_SUCCESS)\n            {\n                return status;\n            }\n        }\n\n        if (currMB->CBP > 0 || currMB->mbMode == AVC_I16)\n        {\n            se_v(stream, &temp);\n            if (temp)\n            {\n                temp += (video->QPy + 52);\n                currMB->QPy = video->QPy = temp - 52 * (temp * 79 >> 12);\n                if (currMB->QPy > 51 || currMB->QPy < 0)\n                {\n                    video->QPy = AVC_CLIP3(0, 51, video->QPy);\n//                  return AVCDEC_FAIL;\n                }\n                video->QPy_div_6 = (video->QPy * 43) >> 8;\n                video->QPy_mod_6 = video->QPy - 6 * video->QPy_div_6;\n                currMB->QPc = video->QPc = mapQPi2QPc[AVC_CLIP3(0, 51, video->QPy + video->currPicParams->chroma_qp_index_offset)];\n                video->QPc_div_6 = (video->QPc * 43) >> 8;\n                video->QPc_mod_6 = video->QPc - 6 * video->QPc_div_6;\n            }\n        }\n        /* decode residue and inverse transform */\n        status = residual(decvid, currMB);\n        if (status != AVCDEC_SUCCESS)\n        {\n            return status;\n        }\n    }\n    else\n    {\n        if (stream->bitcnt & 7)\n        {\n            BitstreamByteAlign(stream);\n        }\n        /* decode pcm_byte[i] */\n        DecodeIntraPCM(video, stream);\n\n        currMB->QPy = 0;  /* necessary for deblocking */ // _OPTIMIZE\n        currMB->QPc = mapQPi2QPc[AVC_CLIP3(0, 51, video->currPicParams->chroma_qp_index_offset)];\n\n        /* default values, don't know if really needed */\n        currMB->CBP = 0x3F;\n        video->cbp4x4 = 0xFFFF;\n        currMB->mb_intra = TRUE;\n        oscl_memset(currMB->nz_coeff, 16, sizeof(uint8)*NUM_BLKS_IN_MB);\n        return AVCDEC_SUCCESS;\n    }\n\n\n    /* do Intra/Inter prediction, together with the residue compensation */\n    /* This part should be common between the skip and no-skip */\n    if (currMB->mbMode == AVC_I4 || currMB->mbMode == AVC_I16)\n    {\n        IntraMBPrediction(video);\n    }\n    else\n    {\n        InterMBPrediction(video);\n    }\n\n\n\n    return AVCDEC_SUCCESS;\n}\n\n/* see subclause 7.3.5.1 */\nAVCDec_Status mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCDecBitstream *stream)\n{\n    int mbPartIdx;\n    AVCSliceHeader *sliceHdr = video->sliceHdr;\n    uint max_ref_idx;\n    const int *temp_0;\n    int16 *temp_1;\n    uint code;\n\n    if (currMB->mbMode == AVC_I4 || currMB->mbMode == AVC_I16)\n    {\n\n        video->intraAvailA = video->intraAvailB = video->intraAvailC = video->intraAvailD = 0;\n\n        if (!video->currPicParams->constrained_intra_pred_flag)\n        {\n            video->intraAvailA = video->mbAvailA;\n            video->intraAvailB = video->mbAvailB;\n            video->intraAvailC = video->mbAvailC;\n            video->intraAvailD = video->mbAvailD;\n        }\n        else\n        {\n            if (video->mbAvailA)\n            {\n                video->intraAvailA = video->mblock[video->mbAddrA].mb_intra;\n            }\n            if (video->mbAvailB)\n            {\n                video->intraAvailB = video->mblock[video->mbAddrB].mb_intra ;\n            }\n            if (video->mbAvailC)\n            {\n                video->intraAvailC = video->mblock[video->mbAddrC].mb_intra;\n            }\n            if (video->mbAvailD)\n            {\n                video->intraAvailD = video->mblock[video->mbAddrD].mb_intra;\n            }\n        }\n\n\n        if (currMB->mbMode == AVC_I4)\n        {\n            /* perform prediction to get the actual intra 4x4 pred mode */\n            DecodeIntra4x4Mode(video, currMB, stream);\n            /* output will be in currMB->i4Mode[4][4] */\n        }\n\n        ue_v(stream, &code);\n\n        if (code > 3)\n        {\n            return AVCDEC_FAIL; /* out of range */\n        }\n        currMB->intra_chroma_pred_mode = (AVCIntraChromaPredMode)code;\n    }\n    else\n    {\n\n        oscl_memset(currMB->ref_idx_L0, 0, sizeof(int16)*4);\n\n        /* see subclause 7.4.5.1 for the range of ref_idx_lX */\n//      max_ref_idx = sliceHdr->num_ref_idx_l0_active_minus1;\n        max_ref_idx = video->refList0Size - 1;\n\n        /* decode ref index for L0 */\n        if (sliceHdr->num_ref_idx_l0_active_minus1 > 0)\n        {\n            for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)\n            {\n                te_v(stream, &code, max_ref_idx);\n                if (code > (uint)max_ref_idx)\n                {\n                    return AVCDEC_FAIL;\n                }\n                currMB->ref_idx_L0[mbPartIdx] = code;\n            }\n        }\n\n        /* populate ref_idx_L0 */\n        temp_0 = &mbPart2raster[currMB->mbMode-AVC_P16][0];\n        temp_1 = &currMB->ref_idx_L0[3];\n\n        *temp_1-- = currMB->ref_idx_L0[*temp_0++];\n        *temp_1-- = currMB->ref_idx_L0[*temp_0++];\n        *temp_1-- = currMB->ref_idx_L0[*temp_0++];\n        *temp_1-- = currMB->ref_idx_L0[*temp_0++];\n\n        /* Global reference index, these values are used in deblock */\n        currMB->RefIdx[0] = video->RefPicList0[currMB->ref_idx_L0[0]]->RefIdx;\n        currMB->RefIdx[1] = video->RefPicList0[currMB->ref_idx_L0[1]]->RefIdx;\n        currMB->RefIdx[2] = video->RefPicList0[currMB->ref_idx_L0[2]]->RefIdx;\n        currMB->RefIdx[3] = video->RefPicList0[currMB->ref_idx_L0[3]]->RefIdx;\n\n        /* see subclause 7.4.5.1 for the range of ref_idx_lX */\n        max_ref_idx = sliceHdr->num_ref_idx_l1_active_minus1;\n        /* decode mvd_l0 */\n        for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)\n        {\n            se_v(stream, &(video->mvd_l0[mbPartIdx][0][0]));\n            se_v(stream, &(video->mvd_l0[mbPartIdx][0][1]));\n        }\n    }\n\n    return AVCDEC_SUCCESS;\n}\n\n/* see subclause 7.3.5.2 */\nAVCDec_Status sub_mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCDecBitstream *stream)\n{\n    int mbPartIdx, subMbPartIdx;\n    AVCSliceHeader *sliceHdr = video->sliceHdr;\n    uint max_ref_idx;\n    uint sub_mb_type[4];\n    uint code;\n\n    oscl_memset(currMB->ref_idx_L0, 0, sizeof(int16)*4);\n\n    for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)\n    {\n        ue_v(stream, &(sub_mb_type[mbPartIdx]));\n        if (sub_mb_type[mbPartIdx] > 3)\n        {\n            return AVCDEC_FAIL;\n        }\n\n    }\n    /* we have to check the values to make sure they are valid  */\n    /* assign values to currMB->sub_mb_type[], currMB->MBPartPredMode[][x] */\n\n    InterpretSubMBModeP(currMB, sub_mb_type);\n\n\n    /* see subclause 7.4.5.1 for the range of ref_idx_lX */\n//      max_ref_idx = sliceHdr->num_ref_idx_l0_active_minus1;\n    max_ref_idx = video->refList0Size - 1;\n\n    if (sliceHdr->num_ref_idx_l0_active_minus1 > 0 && currMB->mbMode != AVC_P8ref0)\n    {\n        for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)\n        {\n            te_v(stream, (uint*)&code, max_ref_idx);\n            if (code > max_ref_idx)\n            {\n                return AVCDEC_FAIL;\n            }\n            currMB->ref_idx_L0[mbPartIdx] = code;\n        }\n    }\n    /* see subclause 7.4.5.1 for the range of ref_idx_lX */\n\n    max_ref_idx = sliceHdr->num_ref_idx_l1_active_minus1;\n    /*  if(video->MbaffFrameFlag && currMB->mb_field_decoding_flag)\n            max_ref_idx = 2*sliceHdr->num_ref_idx_l1_active_minus1 + 1;*/\n    for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)\n    {\n        for (subMbPartIdx = 0; subMbPartIdx < currMB->NumSubMbPart[mbPartIdx]; subMbPartIdx++)\n        {\n            se_v(stream, &(video->mvd_l0[mbPartIdx][subMbPartIdx][0]));\n            se_v(stream, &(video->mvd_l0[mbPartIdx][subMbPartIdx][1]));\n        }\n        /* used in deblocking */\n        currMB->RefIdx[mbPartIdx] = video->RefPicList0[currMB->ref_idx_L0[mbPartIdx]]->RefIdx;\n    }\n    return AVCDEC_SUCCESS;\n}\n\nvoid InterpretMBModeI(AVCMacroblock *mblock, uint mb_type)\n{\n    mblock->NumMbPart = 1;\n\n    mblock->mb_intra = TRUE;\n\n    if (mb_type == 0) /* I_4x4 */\n    {\n        mblock->mbMode = AVC_I4;\n    }\n    else if (mb_type < 25) /* I_PCM */\n    {\n        mblock->mbMode = AVC_I16;\n        mblock->i16Mode = (AVCIntra16x16PredMode)((mb_type - 1) & 0x3);\n        if (mb_type > 12)\n        {\n            mblock->CBP = (((mb_type - 13) >> 2) << 4) + 0x0F;\n        }\n        else\n        {\n            mblock->CBP = ((mb_type - 1) >> 2) << 4;\n        }\n    }\n    else\n    {\n        mblock->mbMode = AVC_I_PCM;\n    }\n\n    return ;\n}\n\nvoid InterpretMBModeP(AVCMacroblock *mblock, uint mb_type)\n{\n    const static int map2PartWidth[5] = {16, 16, 8, 8, 8};\n    const static int map2PartHeight[5] = {16, 8, 16, 8, 8};\n    const static int map2NumPart[5] = {1, 2, 2, 4, 4};\n    const static AVCMBMode map2mbMode[5] = {AVC_P16, AVC_P16x8, AVC_P8x16, AVC_P8, AVC_P8ref0};\n\n    mblock->mb_intra = FALSE;\n    if (mb_type < 5)\n    {\n        mblock->mbMode = map2mbMode[mb_type];\n        mblock->MbPartWidth = map2PartWidth[mb_type];\n        mblock->MbPartHeight = map2PartHeight[mb_type];\n        mblock->NumMbPart = map2NumPart[mb_type];\n        mblock->NumSubMbPart[0] = mblock->NumSubMbPart[1] =\n                                      mblock->NumSubMbPart[2] = mblock->NumSubMbPart[3] = 1;\n        mblock->SubMbPartWidth[0] = mblock->SubMbPartWidth[1] =\n                                        mblock->SubMbPartWidth[2] = mblock->SubMbPartWidth[3] = mblock->MbPartWidth;\n        mblock->SubMbPartHeight[0] = mblock->SubMbPartHeight[1] =\n                                         mblock->SubMbPartHeight[2] = mblock->SubMbPartHeight[3] = mblock->MbPartHeight;\n    }\n    else\n    {\n        InterpretMBModeI(mblock, mb_type - 5);\n        /* set MV and Ref_Idx codes of Intra blocks in P-slices  */\n        oscl_memset(mblock->mvL0, 0, sizeof(int32)*16);\n        mblock->ref_idx_L0[0] = mblock->ref_idx_L0[1] = mblock->ref_idx_L0[2] = mblock->ref_idx_L0[3] = -1;\n    }\n    return ;\n}\n\nvoid InterpretMBModeB(AVCMacroblock *mblock, uint mb_type)\n{\n    const static int map2PartWidth[23] = {8, 16, 16, 16, 16, 8, 16, 8, 16, 8,\n                                          16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 8\n                                         };\n    const static int map2PartHeight[23] = {8, 16, 16, 16, 8, 16, 8, 16, 8,\n                                           16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 16, 8\n                                          };\n    /* see enum AVCMBType declaration */\n    const static AVCMBMode map2mbMode[23] = {AVC_BDirect16, AVC_P16, AVC_P16, AVC_P16,\n                                            AVC_P16x8, AVC_P8x16, AVC_P16x8, AVC_P8x16, AVC_P16x8, AVC_P8x16,\n                                            AVC_P16x8, AVC_P8x16, AVC_P16x8, AVC_P8x16, AVC_P16x8, AVC_P8x16,\n                                            AVC_P16x8, AVC_P8x16, AVC_P16x8, AVC_P8x16, AVC_P16x8, AVC_P8x16, AVC_P8\n                                            };\n    const static int map2PredMode1[23] = {3, 0, 1, 2, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 2, 2, 2, 2, 2, 2, -1};\n    const static int map2PredMode2[23] = { -1, -1, -1, -1, 0, 0, 1, 1, 1, 1, 0, 0, 2, 2, 2, 2, 0, 0, 1, 1, 2, 2, -1};\n    const static int map2NumPart[23] = { -1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4};\n\n    mblock->mb_intra = FALSE;\n\n    if (mb_type < 23)\n    {\n        mblock->mbMode = map2mbMode[mb_type];\n        mblock->NumMbPart = map2NumPart[mb_type];\n        mblock->MBPartPredMode[0][0] = (AVCPredMode)map2PredMode1[mb_type];\n        if (mblock->NumMbPart > 1)\n        {\n            mblock->MBPartPredMode[1][0] = (AVCPredMode)map2PredMode2[mb_type];\n        }\n        mblock->MbPartWidth = map2PartWidth[mb_type];\n        mblock->MbPartHeight = map2PartHeight[mb_type];\n    }\n    else\n    {\n        InterpretMBModeI(mblock, mb_type - 23);\n    }\n\n    return ;\n}\n\nvoid InterpretMBModeSI(AVCMacroblock *mblock, uint mb_type)\n{\n    mblock->mb_intra = TRUE;\n\n    if (mb_type == 0)\n    {\n        mblock->mbMode = AVC_SI4;\n        /* other values are N/A */\n    }\n    else\n    {\n        InterpretMBModeI(mblock, mb_type - 1);\n    }\n    return ;\n}\n\n/* input is mblock->sub_mb_type[] */\nvoid InterpretSubMBModeP(AVCMacroblock *mblock, uint *sub_mb_type)\n{\n    int i,  sub_type;\n    /* see enum AVCMBType declaration */\n//  const static AVCSubMBMode map2subMbMode[4] = {AVC_8x8,AVC_8x4,AVC_4x8,AVC_4x4};\n    const static int map2subPartWidth[4] = {8, 8, 4, 4};\n    const static int map2subPartHeight[4] = {8, 4, 8, 4};\n    const static int map2numSubPart[4] = {1, 2, 2, 4};\n\n    for (i = 0; i < 4 ; i++)\n    {\n        sub_type = (int) sub_mb_type[i];\n        //  mblock->subMbMode[i] = map2subMbMode[sub_type];\n        mblock->NumSubMbPart[i] = map2numSubPart[sub_type];\n        mblock->SubMbPartWidth[i] = map2subPartWidth[sub_type];\n        mblock->SubMbPartHeight[i] = map2subPartHeight[sub_type];\n    }\n\n    return ;\n}\n\nvoid InterpretSubMBModeB(AVCMacroblock *mblock, uint *sub_mb_type)\n{\n    int i, j, sub_type;\n    /* see enum AVCMBType declaration */\n    const static AVCSubMBMode map2subMbMode[13] = {AVC_BDirect8, AVC_8x8, AVC_8x8,\n            AVC_8x8, AVC_8x4, AVC_4x8, AVC_8x4, AVC_4x8, AVC_8x4, AVC_4x8, AVC_4x4, AVC_4x4, AVC_4x4\n                                                  };\n    const static int map2subPartWidth[13] = {4, 8, 8, 8, 8, 4, 8, 4, 8, 4, 4, 4, 4};\n    const static int map2subPartHeight[13] = {4, 8, 8, 8, 4, 8, 4, 8, 4, 8, 4, 4, 4};\n    const static int map2numSubPart[13] = {1, 1, 1, 2, 2, 2, 2, 2, 2, 4, 4, 4};\n    const static int map2predMode[13] = {3, 0, 1, 2, 0, 0, 1, 1, 2, 2, 0, 1, 2};\n\n    for (i = 0; i < 4 ; i++)\n    {\n        sub_type = (int) sub_mb_type[i];\n        mblock->subMbMode[i] = map2subMbMode[sub_type];\n        mblock->NumSubMbPart[i] = map2numSubPart[sub_type];\n        mblock->SubMbPartWidth[i] = map2subPartWidth[sub_type];\n        mblock->SubMbPartHeight[i] = map2subPartHeight[sub_type];\n        for (j = 0; j < 4; j++)\n        {\n            mblock->MBPartPredMode[i][j] = (AVCPredMode)map2predMode[sub_type];\n        }\n    }\n\n    return ;\n}\n\n/* see subclause 8.3.1 */\nAVCDec_Status DecodeIntra4x4Mode(AVCCommonObj *video, AVCMacroblock *currMB, AVCDecBitstream *stream)\n{\n    int intra4x4PredModeA = 0, intra4x4PredModeB = 0, predIntra4x4PredMode = 0;\n    int component, SubBlock_indx, block_x, block_y;\n    int dcOnlyPredictionFlag;\n    uint    prev_intra4x4_pred_mode_flag[16];\n    int     rem_intra4x4_pred_mode[16];\n    int bindx = 0;\n\n    for (component = 0; component < 4; component++) /* partition index */\n    {\n        block_x = ((component & 1) << 1);\n        block_y = ((component >> 1) << 1);\n\n        for (SubBlock_indx = 0; SubBlock_indx < 4; SubBlock_indx++) /* sub-partition index */\n        {\n            BitstreamRead1Bit(stream, &(prev_intra4x4_pred_mode_flag[bindx]));\n\n            if (!prev_intra4x4_pred_mode_flag[bindx])\n            {\n                BitstreamReadBits(stream, 3, (uint*)&(rem_intra4x4_pred_mode[bindx]));\n            }\n\n            dcOnlyPredictionFlag = 0;\n            if (block_x > 0)\n            {\n                intra4x4PredModeA = currMB->i4Mode[(block_y << 2) + block_x - 1 ];\n            }\n            else\n            {\n                if (video->intraAvailA)\n                {\n                    if (video->mblock[video->mbAddrA].mbMode == AVC_I4)\n                    {\n                        intra4x4PredModeA = video->mblock[video->mbAddrA].i4Mode[(block_y << 2) + 3];\n                    }\n                    else\n                    {\n                        intra4x4PredModeA = AVC_I4_DC;\n                    }\n                }\n                else\n                {\n                    dcOnlyPredictionFlag = 1;\n                }\n            }\n\n            if (block_y > 0)\n            {\n                intra4x4PredModeB = currMB->i4Mode[((block_y-1) << 2) + block_x];\n            }\n            else\n            {\n                if (video->intraAvailB)\n                {\n                    if (video->mblock[video->mbAddrB].mbMode == AVC_I4)\n                    {\n                        intra4x4PredModeB = video->mblock[video->mbAddrB].i4Mode[(3 << 2) + block_x];\n                    }\n                    else\n                    {\n                        intra4x4PredModeB = AVC_I4_DC;\n                    }\n                }\n                else\n                {\n                    dcOnlyPredictionFlag = 1;\n                }\n            }\n\n            if (dcOnlyPredictionFlag)\n            {\n                intra4x4PredModeA = intra4x4PredModeB = AVC_I4_DC;\n            }\n\n            predIntra4x4PredMode = AVC_MIN(intra4x4PredModeA, intra4x4PredModeB);\n            if (prev_intra4x4_pred_mode_flag[bindx])\n            {\n                currMB->i4Mode[(block_y<<2)+block_x] = (AVCIntra4x4PredMode)predIntra4x4PredMode;\n            }\n            else\n            {\n                if (rem_intra4x4_pred_mode[bindx] < predIntra4x4PredMode)\n                {\n                    currMB->i4Mode[(block_y<<2)+block_x] = (AVCIntra4x4PredMode)rem_intra4x4_pred_mode[bindx];\n                }\n                else\n                {\n                    currMB->i4Mode[(block_y<<2)+block_x] = (AVCIntra4x4PredMode)(rem_intra4x4_pred_mode[bindx] + 1);\n                }\n            }\n            bindx++;\n            block_y += (SubBlock_indx & 1) ;\n            block_x += (1 - 2 * (SubBlock_indx & 1)) ;\n        }\n    }\n    return AVCDEC_SUCCESS;\n}\nAVCDec_Status ConcealSlice(AVCDecObject *decvid, int mbnum_start, int mbnum_end)\n{\n    AVCCommonObj *video = decvid->common;\n    AVCMacroblock *currMB ;\n\n    int CurrMbAddr;\n\n    if (video->RefPicList0[0] == NULL)\n    {\n        return AVCDEC_FAIL;\n    }\n\n    for (CurrMbAddr = mbnum_start; CurrMbAddr < mbnum_end; CurrMbAddr++)\n    {\n        currMB = video->currMB = &(video->mblock[CurrMbAddr]);\n        video->mbNum = CurrMbAddr;\n        currMB->slice_id = video->slice_id++;  //  slice\n\n        /* we can remove this check if we don't support Mbaff. */\n        /* we can wrap below into an initMB() function which will also\n        do necessary reset of macroblock related parameters. */\n\n        video->mb_x = CurrMbAddr % video->PicWidthInMbs;\n        video->mb_y = CurrMbAddr / video->PicWidthInMbs;\n\n        /* check the availability of neighboring macroblocks */\n        InitNeighborAvailability(video, CurrMbAddr);\n\n        currMB->mb_intra = FALSE;\n\n        currMB->mbMode = AVC_SKIP;\n        currMB->MbPartWidth = currMB->MbPartHeight = 16;\n\n        currMB->NumMbPart = 1;\n        currMB->NumSubMbPart[0] = currMB->NumSubMbPart[1] =\n                                      currMB->NumSubMbPart[2] = currMB->NumSubMbPart[3] = 1;\n        currMB->SubMbPartWidth[0] = currMB->SubMbPartWidth[1] =\n                                        currMB->SubMbPartWidth[2] = currMB->SubMbPartWidth[3] = currMB->MbPartWidth;\n        currMB->SubMbPartHeight[0] = currMB->SubMbPartHeight[1] =\n                                         currMB->SubMbPartHeight[2] = currMB->SubMbPartHeight[3] = currMB->MbPartHeight;\n        currMB->QPy = 26;\n        currMB->QPc = 26;\n        oscl_memset(currMB->nz_coeff, 0, sizeof(uint8)*NUM_BLKS_IN_MB);\n\n        currMB->CBP = 0;\n        video->cbp4x4 = 0;\n        /* for skipped MB, always look at the first entry in RefPicList */\n        currMB->RefIdx[0] = currMB->RefIdx[1] =\n                                currMB->RefIdx[2] = currMB->RefIdx[3] = video->RefPicList0[0]->RefIdx;\n        InterMBPrediction(video);\n\n        video->numMBs--;\n\n    }\n\n    return AVCDEC_SUCCESS;\n}\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/src/vlc.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"avcdec_lib.h\"\n#include \"avcdec_bitstream.h\"\n\n//#define PV_ARM_V5\n#ifdef PV_ARM_V5\n#define PV_CLZ(A,B) __asm{CLZ (A),(B)}  \\\n    A -= 16;\n#else\n#define PV_CLZ(A,B) while (((B) & 0x8000) == 0) {(B) <<=1; A++;}\n#endif\n\n\n#define PV_NO_CLZ\n\n#ifndef PV_NO_CLZ\ntypedef struct tagVLCNumCoeffTrail\n{\n    int trailing;\n    int total_coeff;\n    int length;\n} VLCNumCoeffTrail;\n\ntypedef struct tagShiftOffset\n{\n    int shift;\n    int offset;\n} ShiftOffset;\n\nconst VLCNumCoeffTrail NumCoeffTrailOnes[3][67] =\n{\n    {{0, 0, 1}, {1, 1, 2}, {2, 2, 3}, {1, 2, 6}, {0, 1, 6}, {3, 3, 5}, {3, 3, 5}, {3, 5, 7},\n        {2, 3, 7}, {3, 4, 6}, {3, 4, 6}, {3, 6, 8}, {2, 4, 8}, {1, 3, 8}, {0, 2, 8}, {3, 7, 9},\n        {2, 5, 9}, {1, 4, 9}, {0, 3, 9}, {3, 8, 10}, {2, 6, 10}, {1, 5, 10}, {0, 4, 10}, {3, 9, 11},\n        {2, 7, 11}, {1, 6, 11}, {0, 5, 11}, {0, 8, 13}, {2, 9, 13}, {1, 8, 13}, {0, 7, 13}, {3, 10, 13},\n        {2, 8, 13}, {1, 7, 13}, {0, 6, 13}, {3, 12, 14}, {2, 11, 14}, {1, 10, 14}, {0, 10, 14}, {3, 11, 14},\n        {2, 10, 14}, {1, 9, 14}, {0, 9, 14}, {3, 14, 15}, {2, 13, 15}, {1, 12, 15}, {0, 12, 15}, {3, 13, 15},\n        {2, 12, 15}, {1, 11, 15}, {0, 11, 15}, {3, 16, 16}, {2, 15, 16}, {1, 15, 16}, {0, 14, 16}, {3, 15, 16},\n        {2, 14, 16}, {1, 14, 16}, {0, 13, 16}, {0, 16, 16}, {2, 16, 16}, {1, 16, 16}, {0, 15, 16}, {1, 13, 15},\n        { -1, -1, -1}, { -1, -1, -1}, { -1, -1, -1}},\n\n    {{1, 1, 2}, {0, 0, 2}, {3, 4, 4}, {3, 3, 4}, {2, 2, 3}, {2, 2, 3}, {3, 6, 6}, {2, 3, 6},\n        {1, 3, 6}, {0, 1, 6}, {3, 5, 5}, {3, 5, 5}, {1, 2, 5}, {1, 2, 5}, {3, 7, 6}, {2, 4, 6},\n        {1, 4, 6}, {0, 2, 6}, {3, 8, 7}, {2, 5, 7}, {1, 5, 7}, {0, 3, 7}, {0, 5, 8}, {2, 6, 8},\n        {1, 6, 8}, {0, 4, 8}, {3, 9, 9}, {2, 7, 9}, {1, 7, 9}, {0, 6, 9}, {3, 11, 11}, {2, 9, 11},\n        {1, 9, 11}, {0, 8, 11}, {3, 10, 11}, {2, 8, 11}, {1, 8, 11}, {0, 7, 11}, {0, 11, 12}, {2, 11, 12},\n        {1, 11, 12}, {0, 10, 12}, {3, 12, 12}, {2, 10, 12}, {1, 10, 12}, {0, 9, 12}, {3, 14, 13}, {2, 13, 13},\n        {1, 13, 13}, {0, 13, 13}, {3, 13, 13}, {2, 12, 13}, {1, 12, 13}, {0, 12, 13}, {1, 15, 14}, {0, 15, 14},\n        {2, 15, 14}, {1, 14, 14}, {2, 14, 13}, {2, 14, 13}, {0, 14, 13}, {0, 14, 13}, {3, 16, 14}, {2, 16, 14},\n        {1, 16, 14}, {0, 16, 14}, {3, 15, 13}},\n\n    {{3, 7, 4}, {3, 6, 4}, {3, 5, 4}, {3, 4, 4}, {3, 3, 4}, {2, 2, 4}, {1, 1, 4}, {0, 0, 4},\n        {1, 5, 5}, {2, 5, 5}, {1, 4, 5}, {2, 4, 5}, {1, 3, 5}, {3, 8, 5}, {2, 3, 5}, {1, 2, 5},\n        {0, 3, 6}, {2, 7, 6}, {1, 7, 6}, {0, 2, 6}, {3, 9, 6}, {2, 6, 6}, {1, 6, 6}, {0, 1, 6},\n        {0, 7, 7}, {0, 6, 7}, {2, 9, 7}, {0, 5, 7}, {3, 10, 7}, {2, 8, 7}, {1, 8, 7}, {0, 4, 7},\n        {3, 12, 8}, {2, 11, 8}, {1, 10, 8}, {0, 9, 8}, {3, 11, 8}, {2, 10, 8}, {1, 9, 8}, {0, 8, 8},\n        {0, 12, 9}, {2, 13, 9}, {1, 12, 9}, {0, 11, 9}, {3, 13, 9}, {2, 12, 9}, {1, 11, 9}, {0, 10, 9},\n        {1, 15, 10}, {0, 14, 10}, {3, 14, 10}, {2, 14, 10}, {1, 14, 10}, {0, 13, 10}, {1, 13, 9}, {1, 13, 9},\n        {1, 16, 10}, {0, 15, 10}, {3, 15, 10}, {2, 15, 10}, {3, 16, 10}, {2, 16, 10}, {0, 16, 10}, { -1, -1, -1},\n        { -1, -1, -1}, { -1, -1, -1}, { -1, -1, -1}}\n};\n\n\nconst ShiftOffset NumCoeffTrailOnes_indx[3][15] =\n{\n    {{15, -1}, {14, 0}, {13, 1}, {10, -1}, {9, 3}, {8, 7}, {7, 11}, {6, 15},\n        {5, 19}, {3, 19}, {2, 27}, {1, 35}, {0, 43}, {0, 55}, {1, 62}},\n\n    {{14, -2}, {12, -2}, {10, -2}, {10, 10}, {9, 14}, {8, 18}, {7, 22}, {5, 22},\n        {4, 30}, {3, 38}, {2, 46}, {2, 58}, {3, 65}, {16, 0}, {16, 0}},\n\n    {{12, -8}, {11, 0}, {10, 8}, {9, 16}, {8, 24}, {7, 32}, {6, 40}, {6, 52},\n        {6, 58}, {6, 61}, {16, 0}, {16, 0}, {16, 0}, {16, 0}, {16, 0}}\n};\n\nconst static int nC_table[8] = {0, 0, 1, 1, 2, 2, 2, 2};\n\n#endif\n/**\nSee algorithm in subclause 9.1, Table 9-1, Table 9-2. */\nAVCDec_Status ue_v(AVCDecBitstream *bitstream, uint *codeNum)\n{\n    uint temp, tmp_cnt;\n    int leading_zeros = 0;\n    BitstreamShowBits(bitstream, 16, &temp);\n    tmp_cnt = temp  | 0x1;\n\n    PV_CLZ(leading_zeros, tmp_cnt)\n\n    if (leading_zeros < 8)\n    {\n        *codeNum = (temp >> (15 - (leading_zeros << 1))) - 1;\n        BitstreamFlushBits(bitstream, (leading_zeros << 1) + 1);\n    }\n    else\n    {\n        BitstreamReadBits(bitstream, (leading_zeros << 1) + 1, &temp);\n        *codeNum = temp - 1;\n    }\n\n    return AVCDEC_SUCCESS;\n}\n\n/**\nSee subclause 9.1.1, Table 9-3 */\nAVCDec_Status  se_v(AVCDecBitstream *bitstream, int *value)\n{\n    uint temp, tmp_cnt;\n    int leading_zeros = 0;\n    BitstreamShowBits(bitstream, 16, &temp);\n    tmp_cnt = temp | 0x1;\n\n    PV_CLZ(leading_zeros, tmp_cnt)\n\n    if (leading_zeros < 8)\n    {\n        temp >>= (15 - (leading_zeros << 1));\n        BitstreamFlushBits(bitstream, (leading_zeros << 1) + 1);\n    }\n    else\n    {\n        BitstreamReadBits(bitstream, (leading_zeros << 1) + 1, &temp);\n    }\n\n    *value = temp >> 1;\n\n    if (temp & 0x01)                          // lsb is signed bit\n        *value = -(*value);\n\n//  leading_zeros = temp >> 1;\n//  *value = leading_zeros - (leading_zeros*2*(temp&1));\n\n    return AVCDEC_SUCCESS;\n}\n\nAVCDec_Status  se_v32bit(AVCDecBitstream *bitstream, int32 *value)\n{\n    int leadingZeros;\n    uint32 infobits;\n    uint32 codeNum;\n\n    if (AVCDEC_SUCCESS != GetEGBitstring32bit(bitstream, &leadingZeros, &infobits))\n        return AVCDEC_FAIL;\n\n    codeNum = (1 << leadingZeros) - 1 + infobits;\n\n    *value = (codeNum + 1) / 2;\n\n    if ((codeNum & 0x01) == 0)                        // lsb is signed bit\n        *value = -(*value);\n\n    return AVCDEC_SUCCESS;\n}\n\n\nAVCDec_Status te_v(AVCDecBitstream *bitstream, uint *value, uint range)\n{\n    if (range > 1)\n    {\n        ue_v(bitstream, value);\n    }\n    else\n    {\n        BitstreamRead1Bit(bitstream, value);\n        *value = 1 - (*value);\n    }\n    return AVCDEC_SUCCESS;\n}\n\n\n\n/* This function is only used for syntax with range from -2^31 to 2^31-1 */\n/* only a few of them in the SPS and PPS */\nAVCDec_Status GetEGBitstring32bit(AVCDecBitstream *bitstream, int *leadingZeros, uint32 *infobits)\n{\n    int bit_value;\n    uint info_temp;\n\n    *leadingZeros = 0;\n\n    BitstreamRead1Bit(bitstream, (uint*)&bit_value);\n\n    while (!bit_value)\n    {\n        (*leadingZeros)++;\n        BitstreamRead1Bit(bitstream, (uint*)&bit_value);\n    }\n\n    if (*leadingZeros > 0)\n    {\n        if (sizeof(uint) == 4)  /* 32 bit machine */\n        {\n            BitstreamReadBits(bitstream, *leadingZeros, (uint*)&info_temp);\n            *infobits = (uint32)info_temp;\n        }\n        else if (sizeof(uint) == 2) /* 16 bit machine */\n        {\n            *infobits = 0;\n            if (*leadingZeros > 16)\n            {\n                BitstreamReadBits(bitstream, 16, (uint*)&info_temp);\n                (*leadingZeros) -= 16;\n                *infobits = ((uint32)info_temp) << (*leadingZeros);\n            }\n\n            BitstreamReadBits(bitstream, *leadingZeros, (uint*)&info_temp);\n            *infobits |= (uint32)info_temp ;\n        }\n    }\n    else\n        *infobits = 0;\n\n    return AVCDEC_SUCCESS;\n}\n\n/* see Table 9-4 assignment of codeNum to values of coded_block_pattern. */\nconst static uint8 MapCBP[48][2] =\n{\n    {47, 0}, {31, 16}, {15, 1}, { 0, 2}, {23, 4}, {27, 8}, {29, 32}, {30, 3}, { 7, 5}, {11, 10}, {13, 12}, {14, 15},\n    {39, 47}, {43, 7}, {45, 11}, {46, 13}, {16, 14}, { 3, 6}, { 5, 9}, {10, 31}, {12, 35}, {19, 37}, {21, 42}, {26, 44},\n    {28, 33}, {35, 34}, {37, 36}, {42, 40}, {44, 39}, { 1, 43}, { 2, 45}, { 4, 46}, { 8, 17}, {17, 18}, {18, 20}, {20, 24},\n    {24, 19}, { 6, 21}, { 9, 26}, {22, 28}, {25, 23}, {32, 27}, {33, 29}, {34, 30}, {36, 22}, {40, 25}, {38, 38}, {41, 41},\n};\n\nAVCDec_Status DecodeCBP(AVCMacroblock *currMB, AVCDecBitstream *stream)\n{\n    uint codeNum;\n    uint coded_block_pattern;\n\n    ue_v(stream, &codeNum);\n\n    if (codeNum > 47)\n    {\n        return AVCDEC_FAIL;\n    }\n\n    /* can get rid of the if _OPTIMIZE */\n    if (currMB->mbMode == AVC_I4)\n    {\n        coded_block_pattern = MapCBP[codeNum][0];\n    }\n    else\n    {\n        coded_block_pattern = MapCBP[codeNum][1];\n    }\n\n//  currMB->cbpL = coded_block_pattern&0xF;  /* modulo 16 */\n//  currMB->cbpC = coded_block_pattern>>4;   /* divide 16 */\n    currMB->CBP = coded_block_pattern;\n\n    return AVCDEC_SUCCESS;\n}\n\n\n/* TO BE OPTIMIZED !!!!! */\nAVCDec_Status ce_TotalCoeffTrailingOnes(AVCDecBitstream *stream, int *TrailingOnes, int *TotalCoeff, int nC)\n{\n#ifdef PV_NO_CLZ\n    const static uint8 TotCofNTrail1[75][3] = {{0, 0, 16}/*error */, {0, 0, 16}/*error */, {1, 13, 15}, {1, 13, 15}, {0, 16, 16}, {2, 16, 16}, {1, 16, 16}, {0, 15, 16},\n        {3, 16, 16}, {2, 15, 16}, {1, 15, 16}, {0, 14, 16}, {3, 15, 16}, {2, 14, 16}, {1, 14, 16}, {0, 13, 16},\n        {3, 14, 15}, {2, 13, 15}, {1, 12, 15}, {0, 12, 15}, {3, 13, 15}, {2, 12, 15}, {1, 11, 15}, {0, 11, 15},\n        {3, 12, 14}, {2, 11, 14}, {1, 10, 14}, {0, 10, 14}, {3, 11, 14}, {2, 10, 14}, {1, 9, 14}, {0, 9, 14},\n        {0, 8, 13}, {2, 9, 13}, {1, 8, 13}, {0, 7, 13}, {3, 10, 13}, {2, 8, 13}, {1, 7, 13}, {0, 6, 13},\n        {3, 9, 11}, {2, 7, 11}, {1, 6, 11}, {0, 5, 11}, {3, 8, 10},\n        {2, 6, 10}, {1, 5, 10}, {0, 4, 10}, {3, 7, 9}, {2, 5, 9}, {1, 4, 9}, {0, 3, 9}, {3, 6, 8},\n        {2, 4, 8}, {1, 3, 8}, {0, 2, 8}, {3, 5, 7}, {2, 3, 7}, {3, 4, 6}, {3, 4, 6}, {1, 2, 6},\n        {1, 2, 6}, {0, 1, 6}, {0, 1, 6}, {3, 3, 5}, {3, 3, 5}, {3, 3, 5}, {3, 3, 5}, {2, 2, 3},\n        {1, 1, 2}, {1, 1, 2}, {0, 0, 1}, {0, 0, 1}, {0, 0, 1}, {0, 0, 1}\n    };\n\n    const static uint8 TotCofNTrail2[84][3] = {{0, 0, 14 /* error */}, {0, 0, 14/*error */}, {3, 15, 13}, {3, 15, 13}, {3, 16, 14}, {2, 16, 14}, {1, 16, 14}, {0, 16, 14},\n        {1, 15, 14}, {0, 15, 14}, {2, 15, 14}, {1, 14, 14}, {2, 14, 13}, {2, 14, 13}, {0, 14, 13}, {0, 14, 13},\n        {3, 14, 13}, {2, 13, 13}, {1, 13, 13}, {0, 13, 13}, {3, 13, 13}, {2, 12, 13}, {1, 12, 13}, {0, 12, 13},\n        {0, 11, 12}, {2, 11, 12}, {1, 11, 12}, {0, 10, 12}, {3, 12, 12}, {2, 10, 12}, {1, 10, 12}, {0, 9, 12},\n        {3, 11, 11}, {2, 9, 11}, {1, 9, 11}, {0, 8, 11}, {3, 10, 11}, {2, 8, 11}, {1, 8, 11}, {0, 7, 11},\n        {3, 9, 9}, {2, 7, 9}, {1, 7, 9}, {0, 6, 9}, {0, 5, 8}, {0, 5, 8}, {2, 6, 8}, {2, 6, 8},\n        {1, 6, 8}, {1, 6, 8}, {0, 4, 8}, {0, 4, 8}, {3, 8, 7}, {2, 5, 7}, {1, 5, 7}, {0, 3, 7},\n        {3, 7, 6}, {3, 7, 6}, {2, 4, 6}, {2, 4, 6}, {1, 4, 6}, {1, 4, 6}, {0, 2, 6}, {0, 2, 6},\n        {3, 6, 6}, {2, 3, 6}, {1, 3, 6}, {0, 1, 6}, {3, 5, 5}, {3, 5, 5}, {1, 2, 5}, {1, 2, 5},\n        {3, 4, 4}, {3, 3, 4}, {2, 2, 3}, {2, 2, 3}, {1, 1, 2}, {1, 1, 2}, {1, 1, 2}, {1, 1, 2},\n        {0, 0, 2}, {0, 0, 2}, {0, 0, 2}, {0, 0, 2}\n    };\n\n    const static uint8 TotCofNTrail3[64][3] = {{0, 0, 10/*error*/}, {0, 16, 10}, {3, 16, 10}, {2, 16, 10}, {1, 16, 10}, {0, 15, 10}, {3, 15, 10},\n        {2, 15, 10}, {1, 15, 10}, {0, 14, 10}, {3, 14, 10}, {2, 14, 10}, {1, 14, 10}, {0, 13, 10}, {1, 13, 9},\n        {1, 13, 9}, {0, 12, 9}, {2, 13, 9}, {1, 12, 9}, {0, 11, 9}, {3, 13, 9}, {2, 12, 9}, {1, 11, 9},\n        {0, 10, 9}, {3, 12, 8}, {2, 11, 8}, {1, 10, 8}, {0, 9, 8}, {3, 11, 8}, {2, 10, 8}, {1, 9, 8},\n        {0, 8, 8}, {0, 7, 7}, {0, 6, 7}, {2, 9, 7}, {0, 5, 7}, {3, 10, 7}, {2, 8, 7}, {1, 8, 7},\n        {0, 4, 7}, {0, 3, 6}, {2, 7, 6}, {1, 7, 6}, {0, 2, 6}, {3, 9, 6}, {2, 6, 6}, {1, 6, 6},\n        {0, 1, 6}, {1, 5, 5}, {2, 5, 5}, {1, 4, 5}, {2, 4, 5}, {1, 3, 5}, {3, 8, 5}, {2, 3, 5},\n        {1, 2, 5}, {3, 7, 4}, {3, 6, 4}, {3, 5, 4}, {3, 4, 4}, {3, 3, 4}, {2, 2, 4}, {1, 1, 4},\n        {0, 0, 4}\n    };\n#endif\n    uint code;\n\n#ifdef PV_NO_CLZ\n    uint8 *pcode;\n    if (nC < 2)\n    {\n        BitstreamShowBits(stream, 16, &code);\n\n        if (code >= 8192)\n        {\n            pcode = (uint8*) & (TotCofNTrail1[(code>>13)+65+2][0]);\n        }\n        else if (code >= 2048)\n        {\n            pcode = (uint8*) & (TotCofNTrail1[(code>>9)+50+2][0]);\n        }\n        else if (code >= 1024)\n        {\n            pcode = (uint8*) & (TotCofNTrail1[(code>>8)+46+2][0]);\n        }\n        else if (code >= 512)\n        {\n            pcode = (uint8*) & (TotCofNTrail1[(code>>7)+42+2][0]);\n        }\n        else if (code >= 256)\n        {\n            pcode = (uint8*) & (TotCofNTrail1[(code>>6)+38+2][0]);\n        }\n        else if (code >= 128)\n        {\n            pcode = (uint8*) & (TotCofNTrail1[(code>>5)+34+2][0]);\n        }\n        else if (code >= 64)\n        {\n            pcode = (uint8*) & (TotCofNTrail1[(code>>3)+22+2][0]);\n        }\n        else if (code >= 32)\n        {\n            pcode = (uint8*) & (TotCofNTrail1[(code>>2)+14+2][0]);\n        }\n        else if (code >= 16)\n        {\n            pcode = (uint8*) & (TotCofNTrail1[(code>>1)+6+2][0]);\n        }\n        else\n        {\n            pcode = (uint8*) & (TotCofNTrail1[(code-2)+2][0]);\n        }\n\n        *TrailingOnes = pcode[0];\n        *TotalCoeff = pcode[1];\n\n        BitstreamFlushBits(stream, pcode[2]);\n    }\n    else if (nC < 4)\n    {\n        BitstreamShowBits(stream, 14, &code);\n\n        if (code >= 4096)\n        {\n            pcode = (uint8*) & (TotCofNTrail2[(code>>10)+66+2][0]);\n        }\n        else if (code >= 2048)\n        {\n            pcode = (uint8*) & (TotCofNTrail2[(code>>8)+54+2][0]);\n        }\n        else if (code >= 512)\n        {\n            pcode = (uint8*) & (TotCofNTrail2[(code>>7)+46+2][0]);\n        }\n        else if (code >= 128)\n        {\n            pcode = (uint8*) & (TotCofNTrail2[(code>>5)+34+2][0]);\n        }\n        else if (code >= 64)\n        {\n            pcode = (uint8*) & (TotCofNTrail2[(code>>3)+22+2][0]);\n        }\n        else if (code >= 32)\n        {\n            pcode = (uint8*) & (TotCofNTrail2[(code>>2)+14+2][0]);\n        }\n        else if (code >= 16)\n        {\n            pcode = (uint8*) & (TotCofNTrail2[(code>>1)+6+2][0]);\n        }\n        else\n        {\n            pcode = (uint8*) & (TotCofNTrail2[code-2+2][0]);\n        }\n        *TrailingOnes = pcode[0];\n        *TotalCoeff = pcode[1];\n\n        BitstreamFlushBits(stream, pcode[2]);\n    }\n    else if (nC < 8)\n    {\n        BitstreamShowBits(stream, 10, &code);\n\n        if (code >= 512)\n        {\n            pcode = (uint8*) & (TotCofNTrail3[(code>>6)+47+1][0]);\n        }\n        else if (code >= 256)\n        {\n            pcode = (uint8*) & (TotCofNTrail3[(code>>5)+39+1][0]);\n        }\n        else if (code >= 128)\n        {\n            pcode = (uint8*) & (TotCofNTrail3[(code>>4)+31+1][0]);\n        }\n        else if (code >= 64)\n        {\n            pcode = (uint8*) & (TotCofNTrail3[(code>>3)+23+1][0]);\n        }\n        else if (code >= 32)\n        {\n            pcode = (uint8*) & (TotCofNTrail3[(code>>2)+15+1][0]);\n        }\n        else if (code >= 16)\n        {\n            pcode = (uint8*) & (TotCofNTrail3[(code>>1)+7+1][0]);\n        }\n        else\n        {\n            pcode = (uint8*) & (TotCofNTrail3[code-1+1][0]);\n        }\n        *TrailingOnes = pcode[0];\n        *TotalCoeff = pcode[1];\n\n        BitstreamFlushBits(stream, pcode[2]);\n    }\n    else\n    {\n        /* read 6 bit FLC */\n        BitstreamReadBits(stream, 6, &code);\n\n\n        *TrailingOnes = code & 3;\n        *TotalCoeff = (code >> 2) + 1;\n\n        if (*TotalCoeff > 16)\n        {\n            *TotalCoeff = 16;  // _ERROR\n        }\n\n        if (code == 3)\n        {\n            *TrailingOnes = 0;\n            (*TotalCoeff)--;\n        }\n    }\n#else\n    const VLCNumCoeffTrail *ptr;\n    const ShiftOffset *ptr_indx;\n    uint temp, leading_zeros = 0;\n\n    if (nC < 8)\n    {\n\n        BitstreamShowBits(stream, 16, &code);\n        temp = code | 1;\n\n        PV_CLZ(leading_zeros, temp)\n\n        temp = nC_table[nC];\n        ptr_indx = &NumCoeffTrailOnes_indx[temp][leading_zeros];\n        ptr = &NumCoeffTrailOnes[temp][(code >> ptr_indx->shift) + ptr_indx->offset];\n        *TrailingOnes = ptr->trailing;\n        *TotalCoeff = ptr->total_coeff;\n        BitstreamFlushBits(stream, ptr->length);\n    }\n    else\n    {\n        /* read 6 bit FLC */\n        BitstreamReadBits(stream, 6, &code);\n\n\n        *TrailingOnes = code & 3;\n        *TotalCoeff = (code >> 2) + 1;\n\n        if (*TotalCoeff > 16)\n        {\n            *TotalCoeff = 16;  // _ERROR\n        }\n\n        if (code == 3)\n        {\n            *TrailingOnes = 0;\n            (*TotalCoeff)--;\n        }\n    }\n#endif\n    return AVCDEC_SUCCESS;\n}\n\n/* TO BE OPTIMIZED !!!!! */\nAVCDec_Status ce_TotalCoeffTrailingOnesChromaDC(AVCDecBitstream *stream, int *TrailingOnes, int *TotalCoeff)\n{\n    AVCDec_Status status;\n\n    const static uint8 TotCofNTrail5[21][3] =\n    {\n        {3, 4, 7}, {3, 4, 7}, {2, 4, 8}, {1, 4, 8}, {2, 3, 7}, {2, 3, 7}, {1, 3, 7},\n        {1, 3, 7}, {0, 4, 6}, {0, 3, 6}, {0, 2, 6}, {3, 3, 6}, {1, 2, 6}, {0, 1, 6},\n        {2, 2, 3}, {0, 0, 2}, {0, 0, 2}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}, {1, 1, 1}\n    };\n\n    uint code;\n    uint8 *pcode;\n\n    status = BitstreamShowBits(stream, 8, &code);\n\n    if (code >= 32)\n    {\n        pcode = (uint8*) & (TotCofNTrail5[(code>>5)+13][0]);\n    }\n    else if (code >= 8)\n    {\n        pcode = (uint8*) & (TotCofNTrail5[(code>>2)+6][0]);\n    }\n    else\n    {\n        pcode = (uint8*) & (TotCofNTrail5[code][0]);\n    }\n\n    *TrailingOnes = pcode[0];\n    *TotalCoeff = pcode[1];\n\n    BitstreamFlushBits(stream, pcode[2]);\n\n    return status;\n}\n\n/* see Table 9-6 */\nAVCDec_Status ce_LevelPrefix(AVCDecBitstream *stream, uint *code)\n{\n    uint temp;\n    uint leading_zeros = 0;\n    BitstreamShowBits(stream, 16, &temp);\n    temp |= 1 ;\n\n    PV_CLZ(leading_zeros, temp)\n\n    BitstreamFlushBits(stream, leading_zeros + 1);\n    *code = leading_zeros;\n    return AVCDEC_SUCCESS;\n}\n\n/* see Table 9-7 and 9-8 */\nAVCDec_Status ce_TotalZeros(AVCDecBitstream *stream, int *code, int TotalCoeff)\n{\n    const static uint8 TotZero1[28][2] = {{15, 9}, {14, 9}, {13, 9}, {12, 8},\n        {12, 8}, {11, 8}, {11, 8}, {10, 7}, {9, 7}, {8, 6}, {8, 6}, {7, 6}, {7, 6}, {6, 5}, {6, 5},\n        {6, 5}, {6, 5}, {5, 5}, {5, 5}, {5, 5}, {5, 5}, {4, 4}, {3, 4},\n        {2, 3}, {2, 3}, {1, 3}, {1, 3}, {0, 1}\n    };\n\n    const static uint8 TotZero2n3[2][18][2] = {{{14, 6}, {13, 6}, {12, 6}, {11, 6},\n            {10, 5}, {10, 5}, {9, 5}, {9, 5}, {8, 4}, {7, 4}, {6, 4}, {5, 4}, {4, 3}, {4, 3},\n            {3, 3}, {2, 3}, {1, 3}, {0, 3}},\n\n        /*const static uint8 TotZero3[18][2]=*/{{13, 6}, {11, 6}, {12, 5}, {12, 5}, {10, 5},\n            {10, 5}, {9, 5}, {9, 5}, {8, 4}, {5, 4}, {4, 4}, {0, 4}, {7, 3}, {7, 3}, {6, 3}, {3, 3},\n            {2, 3}, {1, 3}}\n    };\n\n    const static uint8 TotZero4[17][2] = {{12, 5}, {11, 5}, {10, 5}, {0, 5}, {9, 4},\n        {9, 4}, {7, 4}, {7, 4}, {3, 4}, {3, 4}, {2, 4}, {2, 4}, {8, 3}, {6, 3}, {5, 3}, {4, 3}, {1, 3}\n    };\n\n    const static uint8 TotZero5[13][2] = {{11, 5}, {9, 5}, {10, 4}, {8, 4}, {2, 4},\n        {1, 4}, {0, 4}, {7, 3}, {7, 3}, {6, 3}, {5, 3}, {4, 3}, {3, 3}\n    };\n\n    const static uint8 TotZero6to10[5][15][2] = {{{10, 6}, {0, 6}, {1, 5}, {1, 5}, {8, 4},\n            {8, 4}, {8, 4}, {8, 4}, {9, 3}, {7, 3}, {6, 3}, {5, 3}, {4, 3}, {3, 3}, {2, 3}},\n\n        /*const static uint8 TotZero7[15][2]=*/{{9, 6}, {0, 6}, {1, 5}, {1, 5}, {7, 4},\n            {7, 4}, {7, 4}, {7, 4}, {8, 3}, {6, 3}, {4, 3}, {3, 3}, {2, 3}, {5, 2}, {5, 2}},\n\n        /*const static uint8 TotZero8[15][2]=*/{{8, 6}, {0, 6}, {2, 5}, {2, 5}, {1, 4},\n            {1, 4}, {1, 4}, {1, 4}, {7, 3}, {6, 3}, {3, 3}, {5, 2}, {5, 2}, {4, 2}, {4, 2}},\n\n        /*const static uint8 TotZero9[15][2]=*/{{1, 6}, {0, 6}, {7, 5}, {7, 5}, {2, 4},\n            {2, 4}, {2, 4}, {2, 4}, {5, 3}, {6, 2}, {6, 2}, {4, 2}, {4, 2}, {3, 2}, {3, 2}},\n\n        /*const static uint8 TotZero10[11][2]=*/{{1, 5}, {0, 5}, {6, 4}, {6, 4}, {2, 3},\n            {2, 3}, {2, 3}, {2, 3}, {5, 2}, {4, 2}, {3, 2}, {0, 0}, {0, 0}, {0, 0}, {0, 0}}\n    };\n\n    const static uint8 TotZero11[7][2] = {{0, 4}, {1, 4}, {2, 3}, {2, 3}, {3, 3}, {5, 3}, {4, 1}};\n\n    const static uint8 TotZero12to15[4][5][2] =\n    {\n        {{3, 1}, {2, 2}, {4, 3}, {1, 4}, {0, 4}},\n        {{2, 1}, {3, 2}, {1, 3}, {0, 3}, {0, 0}},\n        {{2, 1}, {1, 2}, {0, 2}, {0, 0}, {0, 0}},\n        {{1, 1}, {0, 1}, {0, 0}, {0, 0}, {0, 0}}\n    };\n\n    uint temp, mask;\n    int indx;\n    uint8 *pcode;\n\n    if (TotalCoeff == 1)\n    {\n        BitstreamShowBits(stream, 9, &temp);\n\n        if (temp >= 256)\n        {\n            pcode = (uint8*) & (TotZero1[27][0]);\n        }\n        else if (temp >= 64)\n        {\n            pcode = (uint8*) & (TotZero1[(temp>>5)+19][0]);\n        }\n        else if (temp >= 8)\n        {\n            pcode = (uint8*) & (TotZero1[(temp>>2)+5][0]);\n        }\n        else\n        {\n            pcode = (uint8*) & (TotZero1[temp-1][0]);\n        }\n\n    }\n    else if (TotalCoeff == 2 || TotalCoeff == 3)\n    {\n        BitstreamShowBits(stream, 6, &temp);\n\n        if (temp >= 32)\n        {\n            pcode = (uint8*) & (TotZero2n3[TotalCoeff-2][(temp>>3)+10][0]);\n        }\n        else if (temp >= 8)\n        {\n            pcode = (uint8*) & (TotZero2n3[TotalCoeff-2][(temp>>2)+6][0]);\n        }\n        else\n        {\n            pcode = (uint8*) & (TotZero2n3[TotalCoeff-2][temp][0]);\n        }\n    }\n    else if (TotalCoeff == 4)\n    {\n        BitstreamShowBits(stream, 5, &temp);\n\n        if (temp >= 12)\n        {\n            pcode = (uint8*) & (TotZero4[(temp>>2)+9][0]);\n        }\n        else\n        {\n            pcode = (uint8*) & (TotZero4[temp][0]);\n        }\n    }\n    else if (TotalCoeff == 5)\n    {\n        BitstreamShowBits(stream, 5, &temp);\n\n        if (temp >= 16)\n        {\n            pcode = (uint8*) & (TotZero5[(temp>>2)+5][0]);\n        }\n        else if (temp >= 2)\n        {\n            pcode = (uint8*) & (TotZero5[(temp>>1)+1][0]);\n        }\n        else\n        {\n            pcode = (uint8*) & (TotZero5[temp][0]);\n        }\n    }\n    else if (TotalCoeff >= 6 && TotalCoeff <= 10)\n    {\n        if (TotalCoeff == 10)\n        {\n            BitstreamShowBits(stream, 5, &temp);\n        }\n        else\n        {\n            BitstreamShowBits(stream, 6, &temp);\n        }\n\n\n        if (temp >= 8)\n        {\n            pcode = (uint8*) & (TotZero6to10[TotalCoeff-6][(temp>>3)+7][0]);\n        }\n        else\n        {\n            pcode = (uint8*) & (TotZero6to10[TotalCoeff-6][temp][0]);\n        }\n    }\n    else if (TotalCoeff == 11)\n    {\n        BitstreamShowBits(stream, 4, &temp);\n\n\n        if (temp >= 8)\n        {\n            pcode = (uint8*) & (TotZero11[6][0]);\n        }\n        else if (temp >= 4)\n        {\n            pcode = (uint8*) & (TotZero11[(temp>>1)+2][0]);\n        }\n        else\n        {\n            pcode = (uint8*) & (TotZero11[temp][0]);\n        }\n    }\n    else\n    {\n        BitstreamShowBits(stream, (16 - TotalCoeff), &temp);\n        mask = 1 << (15 - TotalCoeff);\n        indx = 0;\n        while ((temp&mask) == 0 && indx < (16 - TotalCoeff)) /* search location of 1 bit */\n        {\n            mask >>= 1;\n            indx++;\n        }\n\n        pcode = (uint8*) & (TotZero12to15[TotalCoeff-12][indx]);\n    }\n\n    *code = pcode[0];\n    BitstreamFlushBits(stream, pcode[1]);\n\n    return AVCDEC_SUCCESS;\n}\n\n/* see Table 9-9 */\nAVCDec_Status ce_TotalZerosChromaDC(AVCDecBitstream *stream, int *code, int TotalCoeff)\n{\n    const static uint8 TotZeroChrom1to3[3][8][2] =\n    {\n        {{3, 3}, {2, 3}, {1, 2}, {1, 2}, {0, 1}, {0, 1}, {0, 1}, {0, 1}},\n        {{2, 2}, {2, 2}, {1, 2}, {1, 2}, {0, 1}, {0, 1}, {0, 1}, {0, 1}},\n        {{1, 1}, {1, 1}, {1, 1}, {1, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}},\n    };\n\n\n    uint temp;\n    uint8 *pcode;\n\n    BitstreamShowBits(stream, 3, &temp);\n    pcode = (uint8*) & (TotZeroChrom1to3[TotalCoeff-1][temp]);\n\n    *code = pcode[0];\n\n    BitstreamFlushBits(stream, pcode[1]);\n\n    return AVCDEC_SUCCESS;\n}\n\n/* see Table 9-10 */\nAVCDec_Status ce_RunBefore(AVCDecBitstream *stream, int *code, int zerosLeft)\n{\n    const static int codlen[6] = {1, 2, 2, 3, 3, 3}; /* num bits to read */\n    const static uint8 RunBeforeTab[6][8][2] = {{{1, 1}, {0, 1}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}},\n        /*const static int RunBefore2[4][2]=*/{{2, 2}, {1, 2}, {0, 1}, {0, 1}, {0, 0}, {0, 0}, {0, 0}, {0, 0}},\n        /*const static int RunBefore3[4][2]=*/{{3, 2}, {2, 2}, {1, 2}, {0, 2}, {0, 0}, {0, 0}, {0, 0}, {0, 0}},\n        /*const static int RunBefore4[7][2]=*/{{4, 3}, {3, 3}, {2, 2}, {2, 2}, {1, 2}, {1, 2}, {0, 2}, {0, 2}},\n        /*const static int RunBefore5[7][2]=*/{{5, 3}, {4, 3}, {3, 3}, {2, 3}, {1, 2}, {1, 2}, {0, 2}, {0, 2}},\n        /*const static int RunBefore6[7][2]=*/{{1, 3}, {2, 3}, {4, 3}, {3, 3}, {6, 3}, {5, 3}, {0, 2}, {0, 2}}\n    };\n\n    uint temp;\n    uint8 *pcode;\n    int indx;\n\n    if (zerosLeft <= 6)\n    {\n        BitstreamShowBits(stream, codlen[zerosLeft-1], &temp);\n\n        pcode = (uint8*) & (RunBeforeTab[zerosLeft-1][temp][0]);\n\n        *code = pcode[0];\n\n        BitstreamFlushBits(stream, pcode[1]);\n    }\n    else\n    {\n        BitstreamReadBits(stream, 3, &temp);\n        if (temp)\n        {\n            *code = 7 - temp;\n        }\n        else\n        {\n            BitstreamShowBits(stream, 9, &temp);\n            temp <<= 7;\n            temp |= 1;\n            indx = 0;\n            PV_CLZ(indx, temp)\n            *code = 7 + indx;\n            BitstreamFlushBits(stream, indx + 1);\n        }\n    }\n\n\n    return AVCDEC_SUCCESS;\n}\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/src/yuv2rgb.cpp",
    "content": "/*\n * yuv2rgb.cpp\n *\n *  Created on: 29 juil. 2009\n *      Author: rglt1266\n */\n#include <stdio.h>\n#include \"yuv2rgb.h\"\n\nvoid convert (int width,int height, uint8 *in,uint32 *out){\n\tuint8 *pY;\n\tuint8 *pU;\n\tuint8 *pV;\n\tint Y,U,V;\n\tint i,j;\n\tint R,G,B,Cr,Cb;\n\n\t/* Init */\n\tpY = in;\n\tpU = in + (width*height);\n\tpV = pU + (width*height/4);\n\n\tfor(i=0;i<height;i++){\n\t\tfor(j=0;j<width;j++){\n\t\t\t/* YUV values uint */\n\t\t\tY=*((pY)+ (i*width) + j);\n\t\t\tU=*( pU + (j/2) + ((width/2)*(i/2)));\n\t\t\tV=*( pV + (j/2) + ((width/2)*(i/2)));\n\t\t\t/* RBG values */\n\t\t\tCr = V-128;\n\t\t\tCb = U-128;\n\t\t\tR = Y + ((359*Cr)>>8);\n\t\t\tG = Y - ((88*Cb+183*Cr)>>8);\n\t\t\tB = Y + ((454*Cb)>>8);\n\t\t\tif (R>255)R=255; else if (R<0)R=0;\n\t\t\tif (G>255)G=255; else if (G<0)G=0;\n\t\t\tif (B>255)B=255; else if (B<0)B=0;\n\n\t\t\t/* Write data */\n\t\t\tout[((i*width) + j)]=((((R & 0xFF) << 16) | ((G & 0xFF) << 8) | (B & 0xFF))& 0xFFFFFFFF);\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/dec/src/yuv2rgb.h",
    "content": "/*\n * yuv2rgb.h\n *\n *  Created on: 29 juil. 2009\n *      Author: rglt1266\n */\n\n#include \"oscl_types.h\"\n\n#ifndef YUV2RGB_H_\n#define YUV2RGB_H_\n\nvoid convert (int width,int height, uint8 *in,uint32 *out);\n\n#endif /* YUV2RGB_H_ */\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/Android.mk",
    "content": "#\n# Copyright (C) 2008 The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# This makefile supplies the rules for building a library of JNI code for\n# use by our example platform shared library.\n\nLOCAL_PATH:= $(call my-dir)\ninclude $(CLEAR_VARS)\n\nLOCAL_MODULE_TAGS := optional\n\n# This is the target being built.\nLOCAL_MODULE:= libH264Encoder\n\n# All of the source files that we will compile.\nLOCAL_SRC_FILES:= \\\n\tsrc/avcenc_api.cpp \\\n\tsrc/bitstream_io.cpp \\\n\tsrc/block.cpp \\\n\tsrc/findhalfpel.cpp \\\n\tsrc/header.cpp \\\n\tsrc/init.cpp \\\n\tsrc/intra_est.cpp \\\n\tsrc/motion_comp.cpp \\\n\tsrc/motion_est.cpp \\\n\tsrc/rate_control.cpp \\\n\tsrc/residual.cpp \\\n\tsrc/sad.cpp \\\n\tsrc/sad_halfpel.cpp \\\n\tsrc/slice.cpp \\\n\tsrc/vlc_encode.cpp \\\n\tsrc/NativeH264Encoder.cpp \\\n\tsrc/pvavcencoder.cpp \\\n\t../common/src/mb_access.cpp \\\n\t../common/src/reflist.cpp \\\n\t../common/src/fmo.cpp \\\n\t../common/src/deblock.cpp \\\n\t../common/src/dpb.cpp\n\n\n# All of the shared libraries we link against.\nLOCAL_SHARED_LIBRARIES := \n\n# No static libraries.\nLOCAL_STATIC_LIBRARIES :=\n\n# Also need the JNI headers.\nLOCAL_C_INCLUDES += \\\n\t$(JNI_H_INCLUDE)\\\n\t$(LOCAL_PATH)/src \\\n \t$(LOCAL_PATH)/include \\\n\t$(AVC_ROOT)/oscl \\\n\t$(AVC_ROOT)/common/include\n\n# No specia compiler flags.\nLOCAL_CFLAGS +=\n\n# Link libs (ex logs)\nLOCAL_LDLIBS := -llog\n\n# Don't prelink this library.  For more efficient code, you may want\n# to add this library to the prelink map and set this to true.\nLOCAL_PRELINK_MODULE := false\n\ninclude $(BUILD_SHARED_LIBRARY) \n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/include/pvavcencoder.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2010 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef PVAVCENCODER_H_INCLUDED\n#define PVAVCENCODER_H_INCLUDED\n\n#ifndef PVAVCENCODERINTERFACE_H_INCLUDED\n#include \"pvavcencoderinterface.h\"\n#endif\n\n#ifndef AVCENC_API_H_INCLUDED\n#include \"avcenc_api.h\"\n#endif\n\n/** AVC encoder class interface. See PVAVCEncoderInterface APIs for\nvirtual functions definitions. */\nclass PVAVCEncoder : public PVAVCEncoderInterface\n{\n\n    public:\n        OSCL_IMPORT_REF static PVAVCEncoder* New(void);\n        OSCL_IMPORT_REF virtual ~PVAVCEncoder();\n\n        OSCL_IMPORT_REF virtual TAVCEI_RETVAL Initialize(TAVCEIInputFormat* aVidInFormat, TAVCEIEncodeParam* aEncParam);\n        OSCL_IMPORT_REF virtual int32 GetMaxOutputBufferSize();\n        OSCL_IMPORT_REF virtual TAVCEI_RETVAL Encode(TAVCEIInputData* aVidIn);\n        OSCL_IMPORT_REF virtual TAVCEI_RETVAL GetParameterSet(uint8* paramSet, int32* size, int* nalType);\n        OSCL_IMPORT_REF virtual TAVCEI_RETVAL GetOutput(TAVCEIOutputData* aVidOut, int *aRemainingBytes);\n        OSCL_IMPORT_REF virtual TAVCEI_RETVAL FlushInput();\n        virtual TAVCEI_RETVAL CleanupEncoder();\n\n        OSCL_IMPORT_REF virtual TAVCEI_RETVAL UpdateBitRate(int32* aBitRate);\n        OSCL_IMPORT_REF virtual TAVCEI_RETVAL UpdateFrameRate(OsclFloat* aFrameRate);\n        OSCL_IMPORT_REF virtual TAVCEI_RETVAL UpdateIDRFrameInterval(int32 aIDRFrameInterval);\n        OSCL_IMPORT_REF virtual TAVCEI_RETVAL IDRRequest();\n\n        OSCL_IMPORT_REF virtual int32 GetEncodeWidth(int32 aLayer);\n        OSCL_IMPORT_REF virtual int32 GetEncodeHeight(int32 aLayer);\n        OSCL_IMPORT_REF virtual OsclFloat GetEncodeFrameRate(int32 aLayer);\n\n        /* for avc encoder lib callback functions */\n        int     AVC_DPBAlloc(uint frame_size_in_mbs, uint num_buffers);\n        int     AVC_FrameBind(int indx, uint8** yuv);\n        void    AVC_FrameUnbind(int indx);\n\n    private:\n\n        PVAVCEncoder();\n        bool Construct(void);\n        TAVCEI_RETVAL Init(TAVCEIInputFormat *aVidInFormat, TAVCEIEncodeParam *aEncParam, AVCEncParams& aEncOption);\n\n        void CopyToYUVIn(uint8* YUV, int width, int height);\n\n        AVCProfile  mapProfile(TAVCEIProfile in);\n        AVCLevel    mapLevel(TAVCEILevel out);\n\n        /* internal enum */\n        enum TAVCEncState\n        {\n            ECreated,\n            EInitialized,\n            EEncoding\n        };\n\n        TAVCEncState    iState;\n        uint32      iId;\n\n        /* Pure virtuals from OsclActiveObject implemented in this derived class */\n        int     iSrcWidth;\n        int     iSrcHeight;\n        int     iFrameOrientation;\n        OsclFloat       iSrcFrameRate;\n        int     iEncWidth;\n        int     iEncHeight;\n        OsclFloat   iEncFrameRate;\n        TAVCEIVideoFormat   iVideoFormat;\n\n        /* variables needed in operation */\n        AVCHandle iAvcHandle;\n        AVCFrameIO iVidIn;\n        uint8*  iYUVIn;\n        uint8*  iVideoIn;\n        uint8*  iVideoOut;\n        uint32  iTimeStamp;\n        uint32  iPacketSize;\n        uint8*  iOverrunBuffer;\n        int     iOBSize;\n        AVCEnc_Status iEncStatus;\n        bool    iIDR;\n        int     iDispOrd;\n\n        uint8*  iDPB;\n        bool*   iFrameUsed;\n        uint8** iFramePtr;\n        int     iNumFrames;\n\n        /* Tables in color coversion */\n        uint8 * iY_Table;\n        uint16* iCb_Table;\n        uint16* iCr_Table;\n        uint16* ipCb_Table;\n        uint16* ipCr_Table;\n\n\n        int     iNumLayer;\n};\n\n#endif\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/include/pvavcencoder_factory.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef PVAVCENCODER_FACTORY_H_INCLUDED\n#define PVAVCENCODER_FACTORY_H_INCLUDED\n\n#ifndef OSCL_BASE_H_INCLUDED\n#include \"oscl_base.h\"\n#endif\n\n#ifndef OSCL_MEM_H_INCLUDED\n#include \"oscl_mem.h\"\n#endif\n\nclass PVAVCEncoderInterface;\n\nclass PVAVCEncoderFactory\n{\n    public:\n        /**\n         * Creates an instance of a PVAVCDecoder. If the creation fails, this function will leave.\n         *\n         * @returns A pointer to an instance of PVAVCDecoder as PVAVCDecoderInterface reference or leaves if instantiation fails\n         **/\n        OSCL_IMPORT_REF static PVAVCEncoderInterface* CreatePVAVCEncoder();\n\n        /**\n         * Deletes an instance of PVAVCDecoder and reclaims all allocated resources.\n         *\n         * @param aVideoDec The PVAVCDecoder instance to be deleted\n         * @returns A status code indicating success or failure of deletion\n         **/\n        OSCL_IMPORT_REF static bool DeletePVAVCEncoder(PVAVCEncoderInterface* aVideoEnc);\n};\n\n#endif\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/include/pvavcencoderinterface.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2010 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef PVAVCENCODERINTERFACE_H_INCLUDED\n#define PVAVCENCODERINTERFACE_H_INCLUDED\n\n#ifndef OSCL_BASE_H_INCLUDED\n#include \"oscl_base.h\"\n#endif\n\n#define AVC_MAX_LAYER 1\n\n/** General returned values. */\nenum TAVCEI_RETVAL\n{\n    EAVCEI_SUCCESS,\n    EAVCEI_FAIL,    // upon receiving fail, encoder should be reset\n\n    /** Encode return values */\n    EAVCEI_FRAME_DROP,    // current frame is dropped, send in a new frame with new timestamp\n    EAVCEI_NOT_READY,  // the previous frame is still being processed\n    EAVCEI_INPUT_ERROR, // error with input buffers\n\n    /** GetOutput return values */\n    EAVCEI_MORE_DATA,  // there are more data to be retrieve (multiple fragments of a NAL)\n    EAVCEI_MORE_NAL     // there is more NAL to be retrieved\n\n} ;\n\n/** Contains supported input format */\nenum TAVCEIVideoFormat\n{\n    EAVCEI_VDOFMT_RGB24,\n    EAVCEI_VDOFMT_RGB12,\n    EAVCEI_VDOFMT_YUV420,\n    EAVCEI_VDOFMT_UYVY,\n    EAVCEI_VDOFMT_YUV420SEMIPLANAR\n};\n\n/** Type of contents for optimal encoding mode. */\nenum TAVCEIEncodingMode\n{\n    /** Content is encoded as fast as possible with error protection */\n    EAVCEI_ENCMODE_TWOWAY,\n\n    /** Content is encoded as fast as possible without error protection */\n    EAVCEI_ENCMODE_RECORDER,\n\n    /** Content is encoded with better quality (slow) with error protection */\n    EAVCEI_ENCMODE_STREAMING,\n\n    /** Content is encoded with better quality (slow) without error protection */\n    EAVCEI_ENCMODE_DOWNLOAD\n};\n\n/** Rate control type. */\nenum TAVCEIRateControlType\n{\n    /** Constant quality, variable bit rate, fixed quantization level. */\n    EAVCEI_RC_CONSTANT_Q,\n\n    /** Short-term constant bit rate control. */\n    EAVCEI_RC_CBR_1,\n\n    /** Long-term constant bit rate control. */\n    EAVCEI_RC_VBR_1\n};\n\n/** Targeted profile to encode. */\nenum TAVCEIProfile\n{\n    /* Non-scalable profile */\n    EAVCEI_PROFILE_DEFAULT,\n    EAVCEI_PROFILE_BASELINE,\n    EAVCEI_PROFILE_MAIN,\n    EAVCEI_PROFILE_EXTENDED,\n    EAVCEI_PROFILE_HIGH,\n    EAVCEI_PROFILE_HIGH10,\n    EAVCEI_PROFILE_HIGH422,\n    EAVCEI_PROFILE_HIGH444\n};\n\n/** Targeted level to encode. */\nenum TAVCEILevel\n{\n    EAVCEI_LEVEL_AUTODETECT,\n    EAVCEI_LEVEL_1,\n    EAVCEI_LEVEL_1B,\n    EAVCEI_LEVEL_11,\n    EAVCEI_LEVEL_12,\n    EAVCEI_LEVEL_13,\n    EAVCEI_LEVEL_2,\n    EAVCEI_LEVEL_21,\n    EAVCEI_LEVEL_22,\n    EAVCEI_LEVEL_3,\n    EAVCEI_LEVEL_31,\n    EAVCEI_LEVEL_32,\n    EAVCEI_LEVEL_4,\n    EAVCEI_LEVEL_41,\n    EAVCEI_LEVEL_42,\n    EAVCEI_LEVEL_5,\n    EAVCEI_LEVEL_51,\n};\n\n/** Output format */\nenum TAVCEIOutputFormat\n{\n    /** output in byte stream format according to Annex B */\n    EAVCEI_OUTPUT_ANNEXB,\n\n    /** output for MP4 file format */\n    EAVCEI_OUTPUT_MP4,\n\n    /** output in RTP format according to RFC 3984 */\n    EAVCEI_OUTPUT_RTP\n};\n\n\n/** This structure contains encoder settings. */\nstruct TAVCEIEncodeParam\n{\n    /** Specifies an  ID that will be used to specify this encoder while returning\n    the bitstream in asynchronous mode. */\n    uint32              iEncodeID;\n\n    /** Specifies the targeted profile, and will also specifies available tools for iEncMode.\n    If default is used, encoder will choose its own preferred profile. If autodetect is used, encoder\n    will check other settings and choose the right profile that doesn't have any conflicts. */\n    TAVCEIProfile       iProfile;\n\n    /** Specifies the target level  When present,\n    other settings will be checked against the range allowable by this target level.\n    Fail will returned upon Initialize call. If not known, users must set it to autodetect. Encoder will\n    calculate the right level that doesn't conflict with other settings. */\n    TAVCEILevel         iLevel;\n\n    /** Specifies whether base only (iNumLayer = 1) or base + enhancement layer\n    (iNumLayer =2 ) is to be used. */\n    int32               iNumLayer;\n\n    /** Specifies the width in pixels of the encoded frames. IFrameWidth[0] is for\n    base layer and iFrameWidth[1] is for enhanced layer. */\n    int                 iFrameWidth[AVC_MAX_LAYER];\n\n    /** Specifies the height in pixels of the encoded frames. IFrameHeight[0] is for\n    base layer and iFrameHeight[1] is for enhanced layer. */\n    int                 iFrameHeight[AVC_MAX_LAYER];\n\n    /** Specifies the cumulative bit rate in bit per second. IBitRate[0] is for base\n    layer and iBitRate[1] is for base+enhanced layer.*/\n    int                 iBitRate[AVC_MAX_LAYER];\n\n    /** Specifies the cumulative frame rate in frame per second. IFrameRate[0] is for\n    base layer and iFrameRate[1] is for base+enhanced layer. */\n    OsclFloat               iFrameRate[AVC_MAX_LAYER];\n\n    /** Specifies the encoding mode. This translates to the complexity of encoding modes and\n    error resilient tools.\n    */\n    TAVCEIEncodingMode  iEncMode;\n\n    /** Specifies that SPS and PPS are retrieved first and sent out-of-band */\n    bool                iOutOfBandParamSet;\n\n    /** Specifies the desired output format. */\n    TAVCEIOutputFormat  iOutputFormat;\n\n    /** Specifies the packet size in bytes which represents the desired number of bytes per NAL.\n    If this number is set to 0, the encoder will encode the entire slice group as one NAL. */\n    uint32              iPacketSize;\n\n    /** Specifies the rate control algorithm among one of the following constant Q,\n    CBR and VBR. .*/\n    TAVCEIRateControlType iRateControlType;\n\n    /** Specifies the VBV buffer size which determines the end-to-end delay between the\n    encoder and the decoder.  The size is in unit of seconds. For download application,\n    the buffer size can be larger than the streaming application. For 2-way application,\n    this buffer shall be kept minimal. For a special case, in VBR mode, iBufferDelay will\n    be set to -1 to allow buffer underflow. */\n    float               iBufferDelay;\n\n    /** Specifies the initial quantization parameter for the first I-frame. If constant Q\n    rate control is used, this QP will be used for all the I-frames. This number must be\n    set between 1 and 31, otherwise, Initialize() will fail. */\n    int                 iIquant[AVC_MAX_LAYER];\n\n    /** Specifies the initial quantization parameter for the first P-frame. If constant Q\n    rate control is used, this QP will be used for all the P-frames. This number must be\n    set between 1 and 31, otherwise, Initialize() will fail. */\n    int                 iPquant[AVC_MAX_LAYER];\n\n    /** Specifies the initial quantization parameter for the first B-frame. If constant Q\n    rate control is used, this QP will be used for all the B-frames. This number must be\n    set between 1 and 31, otherwise, Initialize() will fail. */\n    int                 iBquant[AVC_MAX_LAYER];\n\n    /** Specifies automatic scene detection where I-frame will be used the the first frame\n    in a new scene. */\n    bool                iSceneDetection;\n\n    /** Specifies the maximum period in seconds between 2 INTRA frames. An INTRA mode is\n    forced to a frame once this interval is reached. When there is only one I-frame is present\n    at the beginning of the clip, iIFrameInterval should be set to -1. For all I-frames coding\n    this number should be set to 0. */\n    int32               iIFrameInterval;\n\n    /** According to iIFrameInterval setting, the minimum number of intra MB per frame is\n    optimally calculated for error resiliency. However, when iIFrameInterval is set to -1,\n    iNumIntraMBRefresh must be specified to guarantee the minimum number of intra\n    macroblocks per frame.*/\n    uint32              iNumIntraMBRefresh;\n\n\n    /** Specifies the duration of the clip in millisecond, needed for VBR encode. Set to 0 if unknown.*/\n    int32               iClipDuration;\n\n    /** Specify FSI Buffer input */\n    uint8*              iFSIBuff;\n\n    /** Specify FSI Buffer Length */\n    int                 iFSIBuffLength;\n\n};\n\n\n/** Structure for input format information */\nstruct TAVCEIInputFormat\n{\n    /** Contains the width in pixels of the input frame. */\n    int32           iFrameWidth;\n\n    /** Contains the height in pixels of the input frame. */\n    int32           iFrameHeight;\n\n    /** Contains the input frame rate in the unit of frame per second. */\n    OsclFloat           iFrameRate;\n\n    /** Contains Frame Orientation. Used for RGB input. 1 means Bottom_UP RGB, 0 means Top_Down RGB, -1 for video formats other than RGB*/\n    int             iFrameOrientation;\n\n    /** Contains the format of the input video, e.g., YUV 4:2:0, UYVY, RGB24, etc. */\n    TAVCEIVideoFormat   iVideoFormat;\n};\n\n\n/** Contains the input data information */\nstruct TAVCEIInputData\n{\n    /** Pointer to an input frame buffer in input source format.*/\n    uint8*      iSource;\n\n    /** The corresponding time stamp of the input frame. */\n    uint32      iTimeStamp;\n};\n\n/** Contains the output data information */\nstruct TAVCEIOutputData\n{\n    /** Pointer to the encoded bitstream buffer. */\n    uint8*          iBitstream;\n\n    /** The size in bytes of iBStream. */\n    int32           iBitstreamSize;\n\n    /** The time stamp of the encoded frame according to the bitstream. */\n    uint32          iTimeStamp;\n\n    /** Set to true if this is a fragment of a NAL */\n    bool            iFragment;\n\n    /** Set to true if this is the last fragment of a NAL*/\n    bool            iLastFragment;\n\n    /** Set to true if this is a key frame */\n    bool            iKeyFrame;\n\n    /** Set to true if this is the last NAL of a frame */\n    bool            iLastNAL;\n\n    /** Pointer to the reconstructed frame buffer in YUV 4:2:0 domain. */\n    uint8           *iFrame;\n};\n\n\n/** \\brief  This class is the base class for codec specific interface class.\nThe users must maintain an instance of the codec specific class throughout\nthe encoding session.\n*/\nclass PVAVCEncoderInterface\n{\n    public:\n        /** \\brief Constructor for PVAVCEncoderInterface class. */\n        virtual ~PVAVCEncoderInterface() {};\n\n        /** \\brief Initialization function to set the input video format and the\n        encoding parameters.\n        \\parm  aVidInFormat contains input related attributes.\n        \\parm  aEncParam contains encoding parameters setting.\n        \\return  fail if there is any errors. Otherwise, the function returns success.*/\n        virtual  TAVCEI_RETVAL Initialize(TAVCEIInputFormat* aVidInFormat, TAVCEIEncodeParam* aEncParam) = 0;\n\n\n        /** \\brief Get suggested output buffer size to be allocated such that no frames are dropped.\n        \\return  Size to be allocated. 0 means the encoder is not initialized. */\n        virtual  int32 GetMaxOutputBufferSize() = 0;\n\n        /** \\brief This function sends in an input video data structure containing a source\n        frame and the associated timestamp. It can start processing such as frame analysis, decision to\n        drop or encode.\n        \\parm  aVidIn contains one frame and other information of input.\n        \\return one of these, SUCCESS, FRAME_DROP, NOT_READY, INPUT_ERROR, FAIL\n        */\n        virtual  TAVCEI_RETVAL Encode(TAVCEIInputData* aVidIn) = 0;\n\n        /** \\brief This function returns an array of parameter sets (either SPS or PPS, as specified by NAL TYPE\n        in the first byte.\n        \\parm paramSet contains buffer for parameters sets.\n        \\parm size is for size of the input/output.\n        \\parm nalType is the NAL type according to the standard.\n        \\return one of these, SUCCESS, INPUT_ERROR, FAIL\n        */\n        virtual  TAVCEI_RETVAL GetParameterSet(uint8* paramSet, int32* size, int *nalType) = 0;\n\n        /** \\brief This function returns a compressed bitstream.\n        \\parm   aVidOut is the structure to contain the output information.\n        \\return one of these, SUCCESS, MORE_DATA, NOT_READY, INPUT_ERROR, FAIL */\n        virtual  TAVCEI_RETVAL GetOutput(TAVCEIOutputData* aVidOut, int *aRemainingBytes) = 0;\n\n        /** This function is used to flush all the unencoded frames store inside the encoder (if there exist).\n        It is used for random re-positioning. Or free all the input. Note that if users want to flush output\n        also, it has to retrieve all the output by calling GetOutput.\n        \\return  SUCCESS or NOT_READY (if the current frame is being used). */\n        virtual  TAVCEI_RETVAL FlushInput() = 0;\n\n        /** This function cleanup the AVCEI allocated resources.\n        \\return  SUCCESS or FAIL. If fail, exception should be thrown. */\n        virtual  TAVCEI_RETVAL CleanupEncoder() = 0;\n\n        /**This function dynamically changes the target bit rate of the encoder\n        while encoding. aBitRate[n] is the new accumulate target bit rate of layer n.\n        \\parm aBitRate is an array of the new target bit rates, size of array is the number of layers.\n        \\return SUCCESS, INPUT_ERROR or FAIL (if values are invalid) */\n        virtual  TAVCEI_RETVAL UpdateBitRate(int32* aBitRate) = 0;\n\n        /** This function dynamically changes the target frame rate of the encoder\n        while encoding.\n        \\parm aFrameRate is an array of new accumulate target frame rate\n        \\return SUCCESS, INPUT_ERROR or FAIL (if values are invalid) */\n        virtual  TAVCEI_RETVAL UpdateFrameRate(OsclFloat* aFrameRate) = 0;\n\n        /** This function dynamically changes the IDR frame update interval while\n        encoding to a new value.\n        \\parm aIFrameInterval is a new value of the IDR-frame interval in millisecond.\n        \\return SUCCESS or FAIL (if the value is invalid). */\n        virtual  TAVCEI_RETVAL UpdateIDRFrameInterval(int32 aIDRFrameInterval) = 0;\n\n        /** This function forces an IDR mode to the next frame to be encoded.\n        \\return  none. */\n        virtual  TAVCEI_RETVAL IDRRequest() = 0;\n\n        /** This function returns the input width of a specific layer\n        (not necessarily multiple of 16).\n        \\param aLayer specifies the layer of interest\n        \\return  width in pixels. */\n        virtual  int32 GetEncodeWidth(int32 aLayer) = 0;\n\n        /** This function returns the input height of a specific layer\n        (not necessarily multiple of 16).\n        \\param aLayer specifies the layer of interest\n        \\return  height in pixels. */\n        virtual  int32 GetEncodeHeight(int32 aLayer) = 0;\n\n        /** This function returns the target encoded frame rate of a specific layer.\n        \\param aLayer specifies the layer of interest\n        \\return  frame rate in fps. */\n\n        virtual  OsclFloat GetEncodeFrameRate(int32 aLayer) = 0;\n\n};\n\n#endif\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/NativeH264Encoder.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 2009 OrangeLabs\n *\n * Author: Alexis Gilabert Senar\n * Date: 2009-07-01\n * -------------------------------------------------------------------\n */\n#define LOG_TAG \"NativeEnc\"\n#include \"NativeH264Encoder.h\"\n#include \"pvavcencoder.h\"\n#include \"android/log.h\"\n\nint     iSrcWidth;\nint     iSrcHeight;\nfloat   iSrcFrameRate;\nint        FrameSize;\n//int        NalComplete = 0;\n// xxx pa\nint        NalComplete = 1;\n\nint SkipNextEncoding = 0;\n\n/* variables needed in operation */\nPVAVCEncoder                *encoder;\nTAVCEIInputFormat        *iInputFormat;\nTAVCEIEncodeParam        *iEncodeParam;\nTAVCEIInputData                *iInData;\nTAVCEIOutputData        *iOutData;\nTAVCEI_RETVAL                status;\n\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder\n * Method:    InitEncoder\n * Signature: (IIF)I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_InitEncoder\n  (JNIEnv *env, jclass iclass, jint width, jint height, jint framerate)\n{\n    /**\n      * Init\n      */\n    iSrcWidth = width;\n    iSrcHeight = height;\n    iSrcFrameRate = framerate;\n    FrameSize = (iSrcWidth*iSrcHeight*3)>>1;\n\n    encoder = PVAVCEncoder::New();\n    if (encoder==NULL) return 0;\n    \n    iInputFormat = (TAVCEIInputFormat*)malloc(sizeof(TAVCEIInputFormat));\n    if (iInputFormat==NULL) {\n      delete(encoder);\n      return 0;\n    }\n\n    iEncodeParam = (TAVCEIEncodeParam*)malloc(sizeof(TAVCEIEncodeParam));\n    if (iEncodeParam==NULL) {\n      free(iInputFormat);\n      delete(encoder);\n      return 0;\n    }\n\n    iInData = (TAVCEIInputData*)malloc(sizeof(TAVCEIInputData));\n    if(iInData==NULL){\n      free(iEncodeParam);\n      free(iInputFormat);\n      delete(encoder);\n      return 0;\n    }\n\n    iOutData = (TAVCEIOutputData*)malloc(sizeof(TAVCEIOutputData));\n    if(iOutData==NULL){\n      free(iInData);\n      free(iEncodeParam);\n      free(iInputFormat);\n      delete(encoder);\n      return 0;\n    }\n    iOutData->iBitstream = (uint8*)malloc(FrameSize);\n    iOutData->iBitstreamSize = FrameSize;\n\n    /**\n      * Set Encoder params\n      */\n    iInputFormat->iFrameWidth = width;\n    iInputFormat->iFrameHeight = height;\n    iInputFormat->iFrameRate = (OsclFloat)(framerate);\n    iInputFormat->iFrameOrientation = -1;\n    iInputFormat->iVideoFormat = EAVCEI_VDOFMT_YUV420SEMIPLANAR;\n\n\n    iEncodeParam->iEncodeID = 0;\n    iEncodeParam->iProfile = EAVCEI_PROFILE_BASELINE;\n\n    // xxx pa switch level due to changed screen size and bandwidth\n    //    iEncodeParam->iLevel = EAVCEI_LEVEL_1B;\n    iEncodeParam->iLevel = EAVCEI_LEVEL_12;\n    \n    iEncodeParam->iNumLayer = 1;\n    iEncodeParam->iFrameWidth[0] = iInputFormat->iFrameWidth;\n    iEncodeParam->iFrameHeight[0] = iInputFormat->iFrameHeight;\n    iEncodeParam->iBitRate[0] = 64000;\n    iEncodeParam->iFrameRate[0] = (OsclFloat)iInputFormat->iFrameRate;\n    iEncodeParam->iEncMode = EAVCEI_ENCMODE_TWOWAY;\n    // iEncodeParam->iOutOfBandParamSet = true;\n    \n    // xxx pa 120503 set to in-band parameter to trigger SPS and PPS with each IFrame\n    iEncodeParam->iOutOfBandParamSet = false;\n    \n    iEncodeParam->iOutputFormat = EAVCEI_OUTPUT_RTP;\n    iEncodeParam->iPacketSize = 8192;\n    iEncodeParam->iRateControlType = EAVCEI_RC_CBR_1;\n    iEncodeParam->iBufferDelay = (OsclFloat)2.0;\n    iEncodeParam->iIquant[0]=15;\n    iEncodeParam->iPquant[0]=12;\n    iEncodeParam->iBquant[0]=0;\n    iEncodeParam->iSceneDetection = false;\n    // iEncodeParam->iIFrameInterval = 15;\n    // xxx pa 120503 set shorten IFrame intervall for more often SPS/PPS NAL units\n    iEncodeParam->iIFrameInterval = 2;\n    iEncodeParam->iNumIntraMBRefresh = 50;\n    iEncodeParam->iClipDuration = 0;\n    iEncodeParam->iFSIBuff = NULL;\n    iEncodeParam->iFSIBuffLength = 0;\n\n    /**\n      * Init encoder\n      */\n    return encoder->Initialize(iInputFormat,iEncodeParam);\n\n}\n\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder\n * Method:    EncodeFrame\n * Signature: ([BJ)[B\n */\nJNIEXPORT jbyteArray JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_EncodeFrame\n  (JNIEnv *env, jclass iclass, jbyteArray frame, jlong timestamp)\n{\n    jbyteArray result ;\n\n    /**\n      * Check NAL\n      */\n    if (NalComplete == 0){\n      __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Checking NAL\");\n      int32 NalSize = 30;\n      int NalType = 0;\n      uint8* NalBuff = (uint8*)malloc(NalSize*sizeof(uint8));\n      if(encoder->GetParameterSet(NalBuff,&NalSize,&NalType)== EAVCEI_SUCCESS){\n        result=(env)->NewByteArray(NalSize);\n        (env)->SetByteArrayRegion(result, 0, NalSize, (jbyte*)NalBuff);\n        free(NalBuff);\n\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Checking NAL GetParameterSet->EAVCEI_SUCCESS status? %d\", status);\n\n      return result;\n      } else {\n        NalComplete = 1; // Now encode video\n      }\n    }\n\n   \n    // only encode if not in MORE_NAL state\n    jint len = env->GetArrayLength(frame);\n    uint8* data = (uint8*)malloc(len);\n    \n    if (SkipNextEncoding == 0){\n    \n        /**\n          * EncodeFrame\n          */\n        env->GetByteArrayRegion (frame, (jint)0, (jint)len, (jbyte*)data);\n\n        iInData->iSource=(uint8*)data;\n        iInData->iTimeStamp = timestamp;\n        \n        // ==============>\n        status = encoder->Encode(iInData);\n        \n        if(status != EAVCEI_SUCCESS){\n          __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Encode fail with code: %d\",status);\n          result=(env)->NewByteArray(0);\n          free(data);\n          return result;\n        }\n    } else {\n        /**\n        * xxx pa skipped encoding due to MORE NAL output signal\n        */\n      __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Skiped encoding\");\n    }\n    \n    int remainingByte = 0;\n    iOutData->iBitstreamSize = FrameSize;\n    \n    // ==============>\n    status = encoder->GetOutput(iOutData,&remainingByte);\n\n    \n    if(!(status == EAVCEI_SUCCESS || status == EAVCEI_MORE_NAL)){\n//    if(status != EAVCEI_SUCCESS){\n      __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Get output fail with code: %d\",status);\n      result=(env)->NewByteArray(0);\n      free(data);\n      return result;\n    }\n\n    // try to get more NAL\n    if(status == EAVCEI_MORE_NAL){\n        SkipNextEncoding = 1;\n    } else {\n        // reset flag\n        SkipNextEncoding = 0;\n    }\n    \n    // Copy aOutBuffer into result\n    result=(env)->NewByteArray(iOutData->iBitstreamSize);\n    (env)->SetByteArrayRegion(result, 0, iOutData->iBitstreamSize, (jbyte*)iOutData->iBitstream);\n    free(data);\n    return result;\n\n}\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder\n * Method:    getLastEncodeStatus\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_getLastEncodeStatus\n  (JNIEnv *env, jclass clazz){\n    return status;\n}\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder\n * Method:    DeinitEncoder\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_DeinitEncoder\n  (JNIEnv *env, jclass clazz){\n    delete(encoder);\n    free(iInputFormat);\n    free(iEncodeParam);\n    free(iInData);\n    free(iOutData);\n    NalComplete = 0;\n    return 1;\n}\n\n/*\n * This is called by the VM when the shared library is first loaded.\n */\njint JNI_OnLoad(JavaVM* vm, void* reserved) {\n    JNIEnv* env = NULL;\n    jint result = -1;\n\n    if (vm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) {\n        goto bail;\n    }\n\n    /* success -- return valid version number */\n    result = JNI_VERSION_1_4;\n\nbail:\n    return result;\n}\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/NativeH264Encoder.cpp__orig",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 2009 OrangeLabs\n *\n * Author: Alexis Gilabert Senar\n * Date: 2009-07-01\n * -------------------------------------------------------------------\n */\n#define LOG_TAG \"NativeEnc\"\n#include \"NativeH264Encoder.h\"\n#include \"pvavcencoder.h\"\n#include \"android/log.h\"\n\nint     iSrcWidth;\nint     iSrcHeight;\nfloat   iSrcFrameRate;\nint\tFrameSize;\nint\tNalComplete = 0;\n\n/* variables needed in operation */\nPVAVCEncoder\t\t*encoder;\nTAVCEIInputFormat\t*iInputFormat;\nTAVCEIEncodeParam\t*iEncodeParam;\nTAVCEIInputData\t\t*iInData;\nTAVCEIOutputData\t*iOutData;\nTAVCEI_RETVAL\t\tstatus;\n\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder\n * Method:    InitEncoder\n * Signature: (IIF)I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_InitEncoder\n  (JNIEnv *env, jclass iclass, jint width, jint height, jint framerate)\n{\n    /**\n      * Init\n      */\n    iSrcWidth = width;\n    iSrcHeight = height;\n    iSrcFrameRate = framerate;\n    FrameSize = (iSrcWidth*iSrcHeight*3)>>1;\n\n    encoder = PVAVCEncoder::New();\n    if (encoder==NULL) return 0;\n    \n    iInputFormat = (TAVCEIInputFormat*)malloc(sizeof(TAVCEIInputFormat));\n    if (iInputFormat==NULL) {\n      delete(encoder);\n      return 0;\n    }\n\n    iEncodeParam = (TAVCEIEncodeParam*)malloc(sizeof(TAVCEIEncodeParam));\n    if (iEncodeParam==NULL) {\n      free(iInputFormat);\n      delete(encoder);\n      return 0;\n    }\n\n    iInData = (TAVCEIInputData*)malloc(sizeof(TAVCEIInputData));\n    if(iInData==NULL){\n      free(iEncodeParam);\n      free(iInputFormat);\n      delete(encoder);\n      return 0;\n    }\n\n    iOutData = (TAVCEIOutputData*)malloc(sizeof(TAVCEIOutputData));\n    if(iOutData==NULL){\n      free(iInData);\n      free(iEncodeParam);\n      free(iInputFormat);\n      delete(encoder);\n      return 0;\n    }\n    iOutData->iBitstream = (uint8*)malloc(FrameSize);\n    iOutData->iBitstreamSize = FrameSize;\n\n    /**\n      * Set Encoder params\n      */\n    iInputFormat->iFrameWidth = width;\n    iInputFormat->iFrameHeight = height;\n    iInputFormat->iFrameRate = (OsclFloat)(framerate);\n    iInputFormat->iFrameOrientation = -1;\n    iInputFormat->iVideoFormat = EAVCEI_VDOFMT_YUV420SEMIPLANAR;\n\n\n    iEncodeParam->iEncodeID = 0;\n    iEncodeParam->iProfile = EAVCEI_PROFILE_BASELINE;\n    iEncodeParam->iLevel = EAVCEI_LEVEL_1B;\n    iEncodeParam->iNumLayer = 1;\n    iEncodeParam->iFrameWidth[0] = iInputFormat->iFrameWidth;\n    iEncodeParam->iFrameHeight[0] = iInputFormat->iFrameHeight;\n    iEncodeParam->iBitRate[0] = 64000;\n    iEncodeParam->iFrameRate[0] = (OsclFloat)iInputFormat->iFrameRate;\n    iEncodeParam->iEncMode = EAVCEI_ENCMODE_TWOWAY;\n    iEncodeParam->iOutOfBandParamSet = true;\n    iEncodeParam->iOutputFormat = EAVCEI_OUTPUT_RTP;\n    iEncodeParam->iPacketSize = 8192;\n    iEncodeParam->iRateControlType = EAVCEI_RC_CBR_1;\n    iEncodeParam->iBufferDelay = (OsclFloat)2.0;\n    iEncodeParam->iIquant[0]=15;\n    iEncodeParam->iPquant[0]=12;\n    iEncodeParam->iBquant[0]=0;\n    iEncodeParam->iSceneDetection = false;\n    iEncodeParam->iIFrameInterval = 15;\n    iEncodeParam->iNumIntraMBRefresh = 50;\n    iEncodeParam->iClipDuration = 0;\n    iEncodeParam->iFSIBuff = NULL;\n    iEncodeParam->iFSIBuffLength = 0;\n\n    /**\n      * Init encoder\n      */\n    return encoder->Initialize(iInputFormat,iEncodeParam);\n\n}\n\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder\n * Method:    EncodeFrame\n * Signature: ([BJ)[B\n */\nJNIEXPORT jbyteArray JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_EncodeFrame\n  (JNIEnv *env, jclass iclass, jbyteArray frame, jlong timestamp)\n{\n    jbyteArray result ;\n\n    /**\n      * Check NAL\n      */\n    if (NalComplete == 0){\n      __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Checking NAL\");\n      int32 NalSize = 30;\n      int NalType = 0;\n      uint8* NalBuff = (uint8*)malloc(NalSize*sizeof(uint8));\n      if(encoder->GetParameterSet(NalBuff,&NalSize,&NalType)== EAVCEI_SUCCESS){\n\tresult=(env)->NewByteArray(NalSize);\n\t(env)->SetByteArrayRegion(result, 0, NalSize, (jbyte*)NalBuff);\n\tfree(NalBuff);\n\treturn result;\n      } else {\n\tNalComplete = 1; // Now encode video\n      }\n    }\n\n    /**\n      * EncodeFrame\n      */\n    jint len = env->GetArrayLength(frame);\n    uint8* data = (uint8*)malloc(len);\n    env->GetByteArrayRegion (frame, (jint)0, (jint)len, (jbyte*)data);\n\n    iInData->iSource=(uint8*)data;\n    iInData->iTimeStamp = timestamp;\n    status = encoder->Encode(iInData);\n    if(status != EAVCEI_SUCCESS){\n      __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Encode fail with code: %d\",status);\n      result=(env)->NewByteArray(0);\n      free(data);\n      return result;\n    }\n\n    int remainingByte = 0;\n    iOutData->iBitstreamSize = FrameSize;\n    status = encoder->GetOutput(iOutData,&remainingByte);\n    if(status != EAVCEI_SUCCESS){\n      __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Get output fail with code: %d\",status);\n      result=(env)->NewByteArray(0);\n      free(data);\n      return result;\n    }\n\n    // Copy aOutBuffer into result\n    result=(env)->NewByteArray(iOutData->iBitstreamSize);\n    (env)->SetByteArrayRegion(result, 0, iOutData->iBitstreamSize, (jbyte*)iOutData->iBitstream);\n    free(data);\n    return result;\n\n}\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder\n * Method:    getLastEncodeStatus\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_getLastEncodeStatus\n  (JNIEnv *env, jclass clazz){\n    return status;\n}\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder\n * Method:    DeinitEncoder\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_DeinitEncoder\n  (JNIEnv *env, jclass clazz){\n    delete(encoder);\n    free(iInputFormat);\n    free(iEncodeParam);\n    free(iInData);\n    free(iOutData);\n    NalComplete = 0;\n    return 1;\n}\n\n/*\n * This is called by the VM when the shared library is first loaded.\n */\njint JNI_OnLoad(JavaVM* vm, void* reserved) {\n    JNIEnv* env = NULL;\n    jint result = -1;\n\n    if (vm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) {\n        goto bail;\n    }\n\n    /* success -- return valid version number */\n    result = JNI_VERSION_1_4;\n\nbail:\n    return result;\n}\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/NativeH264Encoder.h",
    "content": "/* DO NOT EDIT THIS FILE - it is machine generated */\n#include <jni.h>\n/* Header for class com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder */\n\n#ifndef _Included_NativeH264Encoder\n#define _Included_NativeH264Encoder\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder\n * Method:    InitEncoder\n * Signature: (IIF)I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_InitEncoder\n  (JNIEnv *, jclass, jint, jint, jint);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder\n * Method:    EncodeFrame\n * Signature: ([BJ)[B\n */\nJNIEXPORT jbyteArray JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_EncodeFrame\n  (JNIEnv *, jclass, jbyteArray, jlong);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder\n * Method:    DeinitEncoder\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_DeinitEncoder\n  (JNIEnv *, jclass);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder\n * Method:    getLastEncodeStatus\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h264_encoder_NativeH264Encoder_getLastEncodeStatus\n  (JNIEnv *env, jclass clazz);\n\n#ifdef __cplusplus\n}\n#endif\n#endif\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/avcenc_api.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2010 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"oscl_types.h\"\n#include \"oscl_mem.h\"\n#include \"avcenc_api.h\"\n#include \"avcenc_lib.h\"\n\n// xxx pa\n#define LOG_TAG \"avenc_api\"\n#include \"android/log.h\"\n\n/* ======================================================================== */\n/*  Function : PVAVCGetNALType()                                            */\n/*  Date     : 11/4/2003                                                    */\n/*  Purpose  : Sniff NAL type from the bitstream                            */\n/*  In/out   :                                                              */\n/*  Return   : AVCENC_SUCCESS if succeed, AVCENC_FAIL if fail.              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nOSCL_EXPORT_REF AVCEnc_Status PVAVCEncGetNALType(unsigned char *bitstream, int size,\n        int *nal_type, int *nal_ref_idc)\n{\n    int forbidden_zero_bit;\n    if (size > 0)\n    {\n        forbidden_zero_bit = bitstream[0] >> 7;\n        if (forbidden_zero_bit != 0)\n            return AVCENC_FAIL;\n        *nal_ref_idc = (bitstream[0] & 0x60) >> 5;\n        *nal_type = bitstream[0] & 0x1F;\n        return AVCENC_SUCCESS;\n    }\n\n    return AVCENC_FAIL;\n}\n\n/* ======================================================================== */\n/*  Function : PVAVCEncGetProfileLevel()                                            */\n/*  Date     : 3/4/2010                                                    */\n/*  Purpose  : Get profile and level type from the bitstream                            */\n/*  In/out   :                                                              */\n/*  Return   : AVCENC_SUCCESS if succeed, AVCENC_FAIL if fail.              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nOSCL_EXPORT_REF AVCEnc_Status PVAVCEncGetProfileLevel(AVCHandle* avcHandle, AVCProfile* profile, AVCLevel* level)\n{\n    AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject;\n    AVCCommonObj *video = encvid->common;\n    AVCSeqParamSet *seqParam = video->currSeqParams;\n\n    if (encvid == NULL)\n    {\n        return AVCENC_UNINITIALIZED;\n    }\n\n    *profile = (AVCProfile)seqParam->profile_idc;\n    *level = (AVCLevel)seqParam->level_idc;\n\n    return AVCENC_SUCCESS;\n}\n\n\n/* ======================================================================== */\n/*  Function : PVAVCEncInitialize()                                         */\n/*  Date     : 3/18/2004                                                    */\n/*  Purpose  : Initialize the encoder library, allocate memory and verify   */\n/*              the profile/level support/settings.                         */\n/*  In/out   : Encoding parameters.                                         */\n/*  Return   : AVCENC_SUCCESS for success.                                  */\n/*  Modified :                                                              */\n/* ======================================================================== */\nOSCL_EXPORT_REF AVCEnc_Status PVAVCEncInitialize(AVCHandle *avcHandle, AVCEncParams *encParam,\n        void* extSPS, void* extPPS)\n{\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncInitialize\");\n\n    AVCEnc_Status status;\n    AVCEncObject *encvid;\n    AVCCommonObj *video;\n    uint32 *userData = (uint32*) avcHandle->userData;\n    int framesize;\n\n    if (avcHandle->AVCObject != NULL)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncInitialize (AVCObject != NULL) -> return: AVCENC_ALREADY_INITIALIZED\");\n\n        return AVCENC_ALREADY_INITIALIZED; /* It's already initialized, need to cleanup first */\n    }\n\n    /* not initialized */\n\n    /* allocate videoObject */\n    avcHandle->AVCObject = (void*)avcHandle->CBAVC_Malloc(userData, sizeof(AVCEncObject), DEFAULT_ATTR);\n    if (avcHandle->AVCObject == NULL)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncInitialize (AVCObject == NULL) -> return: AVCENC_MEMORY_FAIL\");\n\n        return AVCENC_MEMORY_FAIL;\n    }\n\n    encvid = (AVCEncObject*) avcHandle->AVCObject;\n    oscl_memset(encvid, 0, sizeof(AVCEncObject)); /* reset everything */\n\n    encvid->enc_state = AVCEnc_Initializing;\n\n    encvid->avcHandle = avcHandle;\n\n    encvid->common = (AVCCommonObj*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCCommonObj), DEFAULT_ATTR);\n    if (encvid->common == NULL)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncInitialize (encvid->common == NULL) -> return: AVCENC_MEMORY_FAIL\");\n\n        return AVCENC_MEMORY_FAIL;\n    }\n\n    video = encvid->common;\n    oscl_memset(video, 0, sizeof(AVCCommonObj));\n\n    /* allocate bitstream structure */\n    encvid->bitstream = (AVCEncBitstream*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCEncBitstream), DEFAULT_ATTR);\n    if (encvid->bitstream == NULL)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncInitialize (encvid->bitstream == NULL) -> return: AVCENC_MEMORY_FAIL\");\n    \n        return AVCENC_MEMORY_FAIL;\n    }\n    encvid->bitstream->encvid = encvid; /* to point back for reallocation */\n\n    /* allocate sequence parameter set structure */\n    video->currSeqParams = (AVCSeqParamSet*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCSeqParamSet), DEFAULT_ATTR);\n    if (video->currSeqParams == NULL)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncInitialize (video->currSeqParams == NULL) -> return: AVCENC_MEMORY_FAIL\");\n\n        return AVCENC_MEMORY_FAIL;\n    }\n    oscl_memset(video->currSeqParams, 0, sizeof(AVCSeqParamSet));\n\n    /* allocate picture parameter set structure */\n    video->currPicParams = (AVCPicParamSet*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCPicParamSet), DEFAULT_ATTR);\n    if (video->currPicParams == NULL)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncInitialize (video->currPicParams == NULL) -> return: AVCENC_MEMORY_FAIL\");\n\n        return AVCENC_MEMORY_FAIL;\n    }\n    oscl_memset(video->currPicParams, 0, sizeof(AVCPicParamSet));\n\n    /* allocate slice header structure */\n    video->sliceHdr = (AVCSliceHeader*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCSliceHeader), DEFAULT_ATTR);\n    if (video->sliceHdr == NULL)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncInitialize (video->sliceHdr == NULL) -> return: AVCENC_MEMORY_FAIL\");\n    \n        return AVCENC_MEMORY_FAIL;\n    }\n    oscl_memset(video->sliceHdr, 0, sizeof(AVCSliceHeader));\n\n    /* allocate encoded picture buffer structure*/\n    video->decPicBuf = (AVCDecPicBuffer*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCDecPicBuffer), DEFAULT_ATTR);\n    if (video->decPicBuf == NULL)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncInitialize (video->decPicBuf == NULL) -> return: AVCENC_MEMORY_FAIL\");\n\n        return AVCENC_MEMORY_FAIL;\n    }\n    oscl_memset(video->decPicBuf, 0, sizeof(AVCDecPicBuffer));\n\n    /* allocate rate control structure */\n    encvid->rateCtrl = (AVCRateControl*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCRateControl), DEFAULT_ATTR);\n    if (encvid->rateCtrl == NULL)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncInitialize (encvid->rateCtrl == NULL) -> return: AVCENC_MEMORY_FAIL\");\n\n        return AVCENC_MEMORY_FAIL;\n    }\n    oscl_memset(encvid->rateCtrl, 0, sizeof(AVCRateControl));\n\n    /* reset frame list, not really needed */\n    video->currPic = NULL;\n    video->currFS = NULL;\n    encvid->currInput = NULL;\n    video->prevRefPic = NULL;\n\n    /* now read encParams, and allocate dimension-dependent variables */\n    /* such as mblock */\n    status = SetEncodeParam(avcHandle, encParam, extSPS, extPPS); /* initialized variables to be used in SPS*/\n    if (status != AVCENC_SUCCESS)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncInitialize (SetEncodeParam() != AVCENC_SUCCESS) -> return: status: %d\", status);\n\n        return status;\n    }\n\n    if (encParam->use_overrun_buffer == AVC_ON)\n    {\n        /* allocate overrun buffer */\n        encvid->oBSize = encvid->rateCtrl->cpbSize;\n        if (encvid->oBSize > DEFAULT_OVERRUN_BUFFER_SIZE)\n        {\n            encvid->oBSize = DEFAULT_OVERRUN_BUFFER_SIZE;\n        }\n        encvid->overrunBuffer = (uint8*) avcHandle->CBAVC_Malloc(userData, encvid->oBSize, DEFAULT_ATTR);\n        if (encvid->overrunBuffer == NULL)\n        {\n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncInitialize (encvid->overrunBuffer == NULL) -> return: AVCENC_MEMORY_FAIL\");\n\n            return AVCENC_MEMORY_FAIL;\n        }\n    }\n    else\n    {\n        encvid->oBSize = 0;\n        encvid->overrunBuffer = NULL;\n    }\n\n    /* allocate frame size dependent structures */\n    framesize = video->FrameHeightInMbs * video->PicWidthInMbs;\n\n    video->mblock = (AVCMacroblock*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCMacroblock) * framesize, DEFAULT_ATTR);\n    if (video->mblock == NULL)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncInitialize (video->mblock == NULL) -> return: AVCENC_MEMORY_FAIL\");\n\n        return AVCENC_MEMORY_FAIL;\n    }\n\n    video->MbToSliceGroupMap = (int*) avcHandle->CBAVC_Malloc(userData, sizeof(uint) * video->PicSizeInMapUnits * 2, DEFAULT_ATTR);\n    if (video->MbToSliceGroupMap == NULL)\n    {\n        return AVCENC_MEMORY_FAIL;\n    }\n\n    encvid->mot16x16 = (AVCMV*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCMV) * framesize, DEFAULT_ATTR);\n    if (encvid->mot16x16 == NULL)\n    {\n        return AVCENC_MEMORY_FAIL;\n    }\n    oscl_memset(encvid->mot16x16, 0, sizeof(AVCMV)*framesize);\n\n    encvid->intraSearch = (uint8*) avcHandle->CBAVC_Malloc(userData, sizeof(uint8) * framesize, DEFAULT_ATTR);\n    if (encvid->intraSearch == NULL)\n    {\n        return AVCENC_MEMORY_FAIL;\n    }\n\n    encvid->min_cost = (int*) avcHandle->CBAVC_Malloc(userData, sizeof(int) * framesize, DEFAULT_ATTR);\n    if (encvid->min_cost == NULL)\n    {\n        return AVCENC_MEMORY_FAIL;\n    }\n\n    /* initialize motion search related memory */\n    if (AVCENC_SUCCESS != InitMotionSearchModule(avcHandle))\n    {\n        return AVCENC_MEMORY_FAIL;\n    }\n\n    if (AVCENC_SUCCESS != InitRateControlModule(avcHandle))\n    {\n        return AVCENC_MEMORY_FAIL;\n    }\n\n    /* intialize function pointers */\n    encvid->functionPointer = (AVCEncFuncPtr*) avcHandle->CBAVC_Malloc(userData, sizeof(AVCEncFuncPtr), DEFAULT_ATTR);\n    if (encvid->functionPointer == NULL)\n    {\n        return AVCENC_MEMORY_FAIL;\n    }\n    encvid->functionPointer->SAD_Macroblock = &AVCSAD_Macroblock_C;\n    encvid->functionPointer->SAD_MB_HalfPel[0] = NULL;\n    encvid->functionPointer->SAD_MB_HalfPel[1] = &AVCSAD_MB_HalfPel_Cxh;\n    encvid->functionPointer->SAD_MB_HalfPel[2] = &AVCSAD_MB_HalfPel_Cyh;\n    encvid->functionPointer->SAD_MB_HalfPel[3] = &AVCSAD_MB_HalfPel_Cxhyh;\n\n    /* initialize timing control */\n    encvid->modTimeRef = 0;     /* ALWAYS ASSUME THAT TIMESTAMP START FROM 0 !!!*/\n    video->prevFrameNum = 0;\n    encvid->prevCodedFrameNum = 0;\n    encvid->dispOrdPOCRef = 0;\n\n    if (encvid->outOfBandParamSet == TRUE)\n    {\n        encvid->enc_state = AVCEnc_Encoding_SPS;\n    }\n    else\n    {\n        // xxx pa\n        encvid->enc_state = AVCEnc_Analyzing_Frame;\n        //encvid->enc_state = AVCEnc_Encoding_Frame;\n    }\n\n    return AVCENC_SUCCESS;\n}\n\n/* ======================================================================== */\n/*  Function : PVAVCEncGetMaxOutputSize()                                   */\n/*  Date     : 11/29/2008                                                   */\n/*  Purpose  : Return max output buffer size that apps should allocate for  */\n/*              output buffer.                                              */\n/*  In/out   :                                                              */\n/*  Return   : AVCENC_SUCCESS for success.                                  */\n/*  Modified :   size                                                       */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF AVCEnc_Status PVAVCEncGetMaxOutputBufferSize(AVCHandle *avcHandle, int* size)\n{\n    AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject;\n\n    if (encvid == NULL)\n    {\n        return AVCENC_UNINITIALIZED;\n    }\n\n    *size = encvid->rateCtrl->cpbSize;\n\n    return AVCENC_SUCCESS;\n}\n\n/* ======================================================================== */\n/*  Function : PVAVCEncSetInput()                                           */\n/*  Date     : 4/18/2004                                                    */\n/*  Purpose  : To feed an unencoded original frame to the encoder library.  */\n/*  In/out   :                                                              */\n/*  Return   : AVCENC_SUCCESS for success.                                  */\n/*  Modified :                                                              */\n/* ======================================================================== */\nOSCL_EXPORT_REF AVCEnc_Status PVAVCEncSetInput(AVCHandle *avcHandle, AVCFrameIO *input)\n{\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncSetInput\");\n\n    AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject;\n    AVCCommonObj *video = encvid->common;\n    AVCRateControl *rateCtrl = encvid->rateCtrl;\n\n    AVCEnc_Status status;\n    uint frameNum;\n\n    if (encvid == NULL)\n    {\n    \n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncSetInput return: AVCENC_UNINITIALIZED\");\n\n        return AVCENC_UNINITIALIZED;\n    }\n\n    if (encvid->enc_state == AVCEnc_WaitingForBuffer)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncSetInput goto: RECALL_INITFRAME\");\n        goto RECALL_INITFRAME;\n    }\n    else if (encvid->enc_state != AVCEnc_Analyzing_Frame)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncSetInput return: AVCENC_FAIL\");\n\n        return AVCENC_FAIL;\n    }\n\n    if (input->pitch > 0xFFFF)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncSetInput return: AVCENC_NOT_SUPPORTED\");\n        return AVCENC_NOT_SUPPORTED; // we use 2-bytes for pitch\n    }\n\n    /***********************************/\n\n    /* Let's rate control decide whether to encode this frame or not */\n    /* Also set video->nal_unit_type, sliceHdr->slice_type, video->slice_type */\n    if (AVCENC_SUCCESS != RCDetermineFrameNum(encvid, rateCtrl, input->coding_timestamp, &frameNum))\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncSetInput return: AVCENC_SKIPPED_PICTURE\");\n\n        return AVCENC_SKIPPED_PICTURE; /* not time to encode, thus skipping */\n    }\n\n    /* we may not need this line */\n    //nextFrmModTime = (uint32)((((frameNum+1)*1000)/rateCtrl->frame_rate) + modTimeRef); /* rec. time */\n    //encvid->nextModTime = nextFrmModTime - (encvid->frameInterval>>1) - 1; /* between current and next frame */\n\n    encvid->currInput = input;\n    encvid->currInput->coding_order = frameNum;\n\nRECALL_INITFRAME:\n    /* initialize and analyze the frame */\n    status = InitFrame(encvid);\n\n    if (status == AVCENC_SUCCESS)\n    {\n    \n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncSetInput AVCENC_SUCCESS -> enc_state = AVCEnc_Encoding_Frame\");\n\n        encvid->enc_state = AVCEnc_Encoding_Frame;\n    }\n    else if (status == AVCENC_NEW_IDR)\n    {\n        if (encvid->outOfBandParamSet == TRUE)\n        {\n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncSetInput AVCENC_NEW_IDR -> enc_state = AVCEnc_Encoding_Frame\");\n\n            encvid->enc_state = AVCEnc_Encoding_Frame;\n        }\n        else // assuming that in-band paramset keeps sending new SPS and PPS.\n        {\n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncSetInput AVCENC_NEW_IDR -> enc_state = AVCEnc_Encoding_SPS\");\n\n            encvid->enc_state = AVCEnc_Encoding_SPS;\n            //video->currSeqParams->seq_parameter_set_id++;\n            //if(video->currSeqParams->seq_parameter_set_id > 31) // range check\n            {\n                video->currSeqParams->seq_parameter_set_id = 0;  // reset\n            }\n        }\n\n        video->sliceHdr->idr_pic_id++;\n        if (video->sliceHdr->idr_pic_id > 65535) // range check\n        {\n            video->sliceHdr->idr_pic_id = 0;  // reset\n        }\n    }\n    /* the following logics need to be revisited */\n    else if (status == AVCENC_PICTURE_READY) // no buffers returned back to the encoder\n    {\n    \n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncSetInput AVCENC_PICTURE_READY -> enc_state = AVCEnc_WaitingForBuffer\");\n\n        encvid->enc_state = AVCEnc_WaitingForBuffer; // Input accepted but can't continue\n        // need to free up some memory before proceeding with Encode\n    }\n    \n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncSetInput return: final status\");\n\n    return status; // return status, including the AVCENC_FAIL case and all 3 above.\n}\n\n/* ======================================================================== */\n/*  Function : PVAVCEncodeNAL()                                             */\n/*  Date     : 4/29/2004                                                    */\n/*  Purpose  : To encode one NAL/slice.                                     */\n/*  In/out   :                                                              */\n/*  Return   : AVCENC_SUCCESS for success.                                  */\n/*  Modified :                                                              */\n/* ======================================================================== */\nOSCL_EXPORT_REF AVCEnc_Status PVAVCEncodeNAL(AVCHandle *avcHandle, unsigned char *buffer, unsigned int *buf_nal_size, int *nal_type)\n{\n\n    AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject;\n    AVCCommonObj *video = encvid->common;\n    AVCEncBitstream *bitstream = encvid->bitstream;\n    AVCEnc_Status status;\n\n    if (encvid == NULL)\n    {\n        return AVCENC_UNINITIALIZED;\n    }\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncodeNAL: %d\",encvid->enc_state);\n    \n    switch (encvid->enc_state)\n    {\n        case AVCEnc_Initializing:\n            return AVCENC_UNINITIALIZED;\n        case AVCEnc_Encoding_SPS:\n            \n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncodeNAL: AVCEnc_Encoding_SPS\");\n            \n            /* initialized the structure */\n            BitstreamEncInit(bitstream, buffer, *buf_nal_size, NULL, 0);\n            BitstreamWriteBits(bitstream, 8, (1 << 5) | AVC_NALTYPE_SPS);\n\n            /* encode SPS */\n            status = EncodeSPS(encvid, bitstream);\n            if (status != AVCENC_SUCCESS)\n            {\n                return status;\n            }\n\n            /* closing the NAL with trailing bits */\n            status = BitstreamTrailingBits(bitstream, buf_nal_size);\n            if (status == AVCENC_SUCCESS)\n            {\n                __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncodeNAL AVCEnc_Encoding_SPS -> enc_state = AVCEnc_Encoding_PPS\");\n\n                encvid->enc_state = AVCEnc_Encoding_PPS;\n                video->currPicParams->seq_parameter_set_id = video->currSeqParams->seq_parameter_set_id;\n                video->currPicParams->pic_parameter_set_id++;\n                *nal_type = AVC_NALTYPE_SPS;\n                *buf_nal_size = bitstream->write_pos;\n            }\n            break;\n        case AVCEnc_Encoding_PPS:\n\n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncodeNAL: AVCEnc_Encoding_PPS\");\n\n            /* initialized the structure */\n            BitstreamEncInit(bitstream, buffer, *buf_nal_size, NULL, 0);\n            BitstreamWriteBits(bitstream, 8, (1 << 5) | AVC_NALTYPE_PPS);\n\n            /* encode PPS */\n            status = EncodePPS(encvid, bitstream);\n            if (status != AVCENC_SUCCESS)\n            {\n                return status;\n            }\n\n            /* closing the NAL with trailing bits */\n            status = BitstreamTrailingBits(bitstream, buf_nal_size);\n            if (status == AVCENC_SUCCESS)\n            {\n                if (encvid->outOfBandParamSet == TRUE) // already extract PPS, SPS\n                {\n                \n                    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncodeNAL AVCEnc_Encoding_PPS -> enc_state = AVCEnc_Analyzing_Frame\");\n\n                    encvid->enc_state = AVCEnc_Analyzing_Frame;\n                }\n                else    // SetInput has been called before SPS and PPS.\n                {\n                    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncodeNAL AVCEnc_Encoding_PPS -> enc_state = AVCEnc_Encoding_Frame\");\n\n                    encvid->enc_state = AVCEnc_Encoding_Frame;\n                }\n\n                *nal_type = AVC_NALTYPE_PPS;\n                *buf_nal_size = bitstream->write_pos;\n            }\n            break;\n\n        case AVCEnc_Encoding_Frame:\n\n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncodeNAL: AVCEnc_Encoding_Frame\");\n\n            /* initialized the structure */\n            BitstreamEncInit(bitstream, buffer, *buf_nal_size, encvid->overrunBuffer, encvid->oBSize);\n            BitstreamWriteBits(bitstream, 8, (video->nal_ref_idc << 5) | (video->nal_unit_type));\n\n            /* Re-order the reference list according to the ref_pic_list_reordering() */\n            /* We don't have to reorder the list for the encoder here. This can only be done\n            after we encode this slice. We can run thru a second-pass to see if new ordering\n            would save more bits. Too much delay !! */\n            /* status = ReOrderList(video);*/\n            status = InitSlice(encvid);\n            if (status != AVCENC_SUCCESS)\n            {\n                return status;\n            }\n\n            /* when we have everything, we encode the slice header */\n            status = EncodeSliceHeader(encvid, bitstream);\n            if (status != AVCENC_SUCCESS)\n            {\n                return status;\n            }\n\n            status = AVCEncodeSlice(encvid);\n\n            video->slice_id++;\n\n            /* closing the NAL with trailing bits */\n            BitstreamTrailingBits(bitstream, buf_nal_size);\n\n            *buf_nal_size = bitstream->write_pos;\n\n            encvid->rateCtrl->numFrameBits += ((*buf_nal_size) << 3);\n\n            *nal_type = video->nal_unit_type;\n\n            if (status == AVCENC_PICTURE_READY)\n            {\n                status = RCUpdateFrame(encvid);\n                if (status == AVCENC_SKIPPED_PICTURE) /* skip current frame */\n                {\n                    DPBReleaseCurrentFrame(avcHandle, video);\n                    \n                    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncodeNAL AVCEnc_Encoding_Frame+AVCENC_SKIPPED_PICTURE -> enc_state = AVCEnc_Analyzing_Frame\");\n\n                    encvid->enc_state = AVCEnc_Analyzing_Frame;\n\n                    return status;\n                }\n\n                /* perform loop-filtering on the entire frame */\n                DeblockPicture(video);\n\n                /* update the original frame array */\n                encvid->prevCodedFrameNum = encvid->currInput->coding_order;\n\n                /* store the encoded picture in the DPB buffer */\n                StorePictureInDPB(avcHandle, video);\n\n                if (video->currPic->isReference)\n                {\n                    video->PrevRefFrameNum = video->sliceHdr->frame_num;\n                }\n\n                /* update POC related variables */\n                PostPOC(video);\n                \n                __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncodeNAL AVCEnc_Encoding_Frame -> enc_state = AVCEnc_Analyzing_Frame\");\n\n                encvid->enc_state = AVCEnc_Analyzing_Frame;\n                status = AVCENC_PICTURE_READY;\n\n            }\n            break;\n        default:\n            \n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"PVAVCEncodeNAL: status = WRONG STATE\");\n            \n            status = AVCENC_WRONG_STATE;\n    }\n\n    return status;\n}\n\n/* ======================================================================== */\n/*  Function : PVAVCEncGetOverrunBuffer()                                   */\n/*  Purpose  : To retrieve the overrun buffer. Check whether overrun buffer */\n/*              is used or not before returning                             */\n/*  In/out   :                                                              */\n/*  Return   : Pointer to the internal overrun buffer.                      */\n/*  Modified :                                                              */\n/* ======================================================================== */\nOSCL_EXPORT_REF uint8* PVAVCEncGetOverrunBuffer(AVCHandle* avcHandle)\n{\n    AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject;\n    AVCEncBitstream *bitstream = encvid->bitstream;\n\n    if (bitstream->overrunBuffer == bitstream->bitstreamBuffer) /* OB is used */\n    {\n        return encvid->overrunBuffer;\n    }\n    else\n    {\n        return NULL;\n    }\n}\n\n\n/* ======================================================================== */\n/*  Function : PVAVCEncGetRecon()                                           */\n/*  Date     : 4/29/2004                                                    */\n/*  Purpose  : To retrieve the most recently encoded frame.                 */\n/*              assume that user will make a copy if they want to hold on   */\n/*              to it. Otherwise, it is not guaranteed to be reserved.      */\n/*              Most applications prefer to see original frame rather than  */\n/*              reconstructed frame. So, we stay away from complex          */\n/*              buffering mechanism. If needed, can be added later.         */\n/*  In/out   :                                                              */\n/*  Return   : AVCENC_SUCCESS for success.                                  */\n/*  Modified :                                                              */\n/* ======================================================================== */\nOSCL_EXPORT_REF AVCEnc_Status PVAVCEncGetRecon(AVCHandle *avcHandle, AVCFrameIO *recon)\n{\n    AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject;\n    AVCCommonObj *video = encvid->common;\n    AVCFrameStore *currFS = video->currFS;\n\n    if (encvid == NULL)\n    {\n        return AVCENC_UNINITIALIZED;\n    }\n\n    recon->YCbCr[0] = currFS->frame.Sl;\n    recon->YCbCr[1] = currFS->frame.Scb;\n    recon->YCbCr[2] = currFS->frame.Scr;\n    recon->height = currFS->frame.height;\n    recon->pitch = currFS->frame.pitch;\n    recon->disp_order = currFS->PicOrderCnt;\n    recon->coding_order = currFS->FrameNum;\n    recon->id = (uint32) currFS->base_dpb; /* use the pointer as the id */\n\n    currFS->IsOutputted |= 1;\n\n    return AVCENC_SUCCESS;\n}\n\nOSCL_EXPORT_REF AVCEnc_Status PVAVCEncReleaseRecon(AVCHandle *avcHandle, AVCFrameIO *recon)\n{\n    OSCL_UNUSED_ARG(avcHandle);\n    OSCL_UNUSED_ARG(recon);\n\n    return AVCENC_SUCCESS; //for now\n}\n\n/* ======================================================================== */\n/*  Function : PVAVCCleanUpEncoder()                                        */\n/*  Date     : 4/18/2004                                                    */\n/*  Purpose  : To clean up memories allocated by PVAVCEncInitialize()       */\n/*  In/out   :                                                              */\n/*  Return   : AVCENC_SUCCESS for success.                                  */\n/*  Modified :                                                              */\n/* ======================================================================== */\nOSCL_EXPORT_REF void    PVAVCCleanUpEncoder(AVCHandle *avcHandle)\n{\n    AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject;\n    AVCCommonObj *video;\n    uint32 *userData = (uint32*) avcHandle->userData;\n\n    if (encvid != NULL)\n    {\n        CleanMotionSearchModule(avcHandle);\n\n        CleanupRateControlModule(avcHandle);\n\n        if (encvid->functionPointer != NULL)\n        {\n            avcHandle->CBAVC_Free(userData, (int)encvid->functionPointer);\n        }\n\n        if (encvid->min_cost)\n        {\n            avcHandle->CBAVC_Free(userData, (int)encvid->min_cost);\n        }\n\n        if (encvid->intraSearch)\n        {\n            avcHandle->CBAVC_Free(userData, (int)encvid->intraSearch);\n        }\n\n        if (encvid->mot16x16)\n        {\n            avcHandle->CBAVC_Free(userData, (int)encvid->mot16x16);\n        }\n\n        if (encvid->rateCtrl)\n        {\n            avcHandle->CBAVC_Free(userData, (int)encvid->rateCtrl);\n        }\n\n        if (encvid->overrunBuffer)\n        {\n            avcHandle->CBAVC_Free(userData, (int)encvid->overrunBuffer);\n        }\n\n        video = encvid->common;\n        if (video != NULL)\n        {\n            if (video->MbToSliceGroupMap)\n            {\n                avcHandle->CBAVC_Free(userData, (int)video->MbToSliceGroupMap);\n            }\n            if (video->mblock != NULL)\n            {\n                avcHandle->CBAVC_Free(userData, (int)video->mblock);\n            }\n            if (video->decPicBuf != NULL)\n            {\n                CleanUpDPB(avcHandle, video);\n                avcHandle->CBAVC_Free(userData, (int)video->decPicBuf);\n            }\n            if (video->sliceHdr != NULL)\n            {\n                avcHandle->CBAVC_Free(userData, (int)video->sliceHdr);\n            }\n            if (video->currPicParams != NULL)\n            {\n                if (video->currPicParams->slice_group_id)\n                {\n                    avcHandle->CBAVC_Free(userData, (int)video->currPicParams->slice_group_id);\n                }\n\n                avcHandle->CBAVC_Free(userData, (int)video->currPicParams);\n            }\n            if (video->currSeqParams != NULL)\n            {\n                avcHandle->CBAVC_Free(userData, (int)video->currSeqParams);\n            }\n            if (encvid->bitstream != NULL)\n            {\n                avcHandle->CBAVC_Free(userData, (int)encvid->bitstream);\n            }\n            if (video != NULL)\n            {\n                avcHandle->CBAVC_Free(userData, (int)video);\n            }\n        }\n\n        avcHandle->CBAVC_Free(userData, (int)encvid);\n\n        avcHandle->AVCObject = NULL;\n    }\n\n    return ;\n}\n\n/* ======================================================================== */\n/*  Function : PVAVCEncUpdateBitRate()                                      */\n/*  Date     : 2/20/2010                                                    */\n/*  Purpose  : Update bitrate while encoding.                               */\n/*  In/out   :                                                              */\n/*  Return   : AVCENC_SUCCESS for success, else fail.                       */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF AVCEnc_Status PVAVCEncUpdateBitRate(AVCHandle *avcHandle, uint32 bitrate)\n{\n    AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject;\n    AVCCommonObj *video = encvid->common;\n    AVCRateControl *rateCtrl = encvid->rateCtrl;\n    AVCSeqParamSet *seqParam = video->currSeqParams;\n\n    int lev_idx;\n\n    if (encvid == NULL)\n    {\n        return AVCENC_UNINITIALIZED;\n    }\n\n    // only allow changing bit rate right after encoding a frame and before a new frame is analyzed.\n    if (encvid->enc_state != AVCEnc_Analyzing_Frame)\n    {\n        return AVCENC_WRONG_STATE;\n    }\n\n    if (bitrate && rateCtrl->cpbSize && (rateCtrl->rcEnable == TRUE))\n    {\n        // verify level constraint\n        // Note we keep the same cbpsize, hence the vbv delay will be affected.\n        lev_idx = mapLev2Idx[seqParam->level_idc];\n\n        if (bitrate > (uint32)(MaxBR[lev_idx]*1000))\n        {\n            return AVCENC_FAIL;\n        }\n\n        rateCtrl->bitRate = bitrate;\n\n        // update other rate control parameters\n        RCUpdateParams(rateCtrl, encvid);\n\n        return AVCENC_SUCCESS;\n    }\n    else\n    {\n        return AVCENC_FAIL;\n    }\n}\n\n/* ======================================================================== */\n/*  Function : PVAVCEncUpdateFrameRate()                                    */\n/*  Date     : 2/20/2010                                                    */\n/*  Purpose  : Update frame rate while encoding.                            */\n/*  In/out   :                                                              */\n/*  Return   : AVCENC_SUCCESS for success, else fail.                       */\n/*  Limitation: Changing frame rate will affect the first IDR frame coming  */\n/*             after this call. It may come earlier or later than expected  */\n/*             but after this first IDR frame, the IDR period will be back  */\n/*             to normal.                                                   */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF AVCEnc_Status PVAVCEncUpdateFrameRate(AVCHandle *avcHandle, uint32 num, uint32 denom)\n{\n    AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject;\n    AVCCommonObj *video = encvid->common;\n    AVCRateControl *rateCtrl = encvid->rateCtrl;\n    AVCSeqParamSet *seqParam = video->currSeqParams;\n\n    int mb_per_sec;\n    int lev_idx;\n\n    if (encvid == NULL)\n    {\n        return AVCENC_UNINITIALIZED;\n    }\n\n    // only allow changing frame rate right after encoding a frame and before a new frame is analyzed.\n    if (encvid->enc_state != AVCEnc_Analyzing_Frame)\n    {\n        return AVCENC_WRONG_STATE;\n    }\n\n    if (num && denom && (rateCtrl->rcEnable == TRUE))\n    {\n        mb_per_sec = ((video->PicSizeInMbs * num) + denom - 1) / denom;\n\n        // copy some code from VerifyLevel here\n        lev_idx = mapLev2Idx[seqParam->level_idc];\n\n        if (mb_per_sec > MaxMBPS[lev_idx])\n        {\n            return AVCENC_FAIL;\n        }\n\n        rateCtrl->frame_rate = (OsclFloat)num / denom;\n\n        // update other rate control parameters\n        RCUpdateParams(rateCtrl, encvid);\n\n        return AVCENC_SUCCESS;\n    }\n    else\n    {\n        return AVCENC_FAIL;\n    }\n\n    return AVCENC_FAIL;\n}\n\n\n/* ======================================================================== */\n/*  Function : PVAVCEncUpdateIDRInterval()                                  */\n/*  Date     : 2/20/2010                                                    */\n/*  Purpose  : Update IDR interval while encoding.                          */\n/*  In/out   :                                                              */\n/*  Return   : AVCENC_SUCCESS for success, else fail.                       */\n/*  Limitation: See PVAVCEncUpdateFrameRate.                                */\n/*  Modified :                                                              */\n/* ======================================================================== */\nOSCL_EXPORT_REF AVCEnc_Status PVAVCEncUpdateIDRInterval(AVCHandle *avcHandle, int IDRInterval)\n{\n    AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject;\n    AVCCommonObj *video = encvid->common;\n    AVCRateControl *rateCtrl = encvid->rateCtrl;\n\n    if (encvid == NULL)\n    {\n        return AVCENC_UNINITIALIZED;\n    }\n\n    if (IDRInterval > (int)video->MaxFrameNum)\n    {\n        return AVCENC_FAIL;\n    }\n\n    /* Note : IDRInterval defines periodicity of IDR frames after every nPFrames.*/\n    rateCtrl->idrPeriod = IDRInterval;\n\n    /* Note, when set to 1 (all I-frame), rate control is turned off */\n\n    return AVCENC_SUCCESS;\n}\n\n/* ======================================================================== */\n/*  Function : PVAVCEncIDRRequest()                                         */\n/*  Date     : 2/20/2010                                                    */\n/*  Purpose  : Request next frame to be IDR.                                */\n/*  In/out   :                                                              */\n/*  Return   : AVCENC_SUCCESS for success, else fail.                       */\n/*  Modified :                                                              */\n/* ======================================================================== */\nOSCL_EXPORT_REF AVCEnc_Status PVAVCEncIDRRequest(AVCHandle *avcHandle)\n{\n    AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject;\n    AVCRateControl *rateCtrl = encvid->rateCtrl;\n\n    if (encvid == NULL)\n    {\n        return AVCENC_UNINITIALIZED;\n    }\n\n    // only allow changing frame rate right after encoding a frame and before a new frame is analyzed.\n    if (encvid->enc_state != AVCEnc_Analyzing_Frame)\n    {\n        return AVCENC_WRONG_STATE;\n    }\n\n    rateCtrl->first_frame = 1;\n\n    return AVCENC_SUCCESS;\n}\n\n\n/* ======================================================================== */\n/*  Function : PVAVCEncUpdateIMBRefresh()                                   */\n/*  Date     : 2/20/2010                                                    */\n/*  Purpose  : Update number of minimal I MBs per frame.                    */\n/*  In/out   :                                                              */\n/*  Return   : AVCENC_SUCCESS for success, else fail.                       */\n/*  Modified :                                                              */\n/* ======================================================================== */\nOSCL_EXPORT_REF AVCEnc_Status PVAVCEncUpdateIMBRefresh(AVCHandle *avcHandle, int numMB)\n{\n    AVCEncObject *encvid = (AVCEncObject*)avcHandle->AVCObject;\n    AVCRateControl *rateCtrl = encvid->rateCtrl;\n    AVCCommonObj *video = encvid->common;\n\n    if (encvid == NULL)\n    {\n        return AVCENC_UNINITIALIZED;\n    }\n\n    if (numMB <= (int)video->PicSizeInMbs)\n    {\n        rateCtrl->intraMBRate = numMB;\n        return AVCENC_SUCCESS;\n    }\n\n    return AVCENC_FAIL;\n}\n\nvoid PVAVCEncGetFrameStats(AVCHandle *avcHandle, AVCEncFrameStats *avcStats)\n{\n    AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject;\n    AVCRateControl *rateCtrl = encvid->rateCtrl;\n\n    avcStats->avgFrameQP = GetAvgFrameQP(rateCtrl);\n    avcStats->numIntraMBs = encvid->numIntraMB;\n\n    return ;\n}\n\n\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/avcenc_api.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2010 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/**\nThis file contains application function interfaces to the AVC encoder library\nand necessary type defitionitions and enumerations.\n@publishedAll\n*/\n\n#ifndef AVCENC_API_H_INCLUDED\n#define AVCENC_API_H_INCLUDED\n\n#ifndef OSCL_BASE_H_INCLUDED\n#include \"oscl_base.h\"\n#endif\n\n#ifndef OSCL_TYPES_H_INCLUDED\n#include \"oscl_types.h\"\n#endif\n\n#ifndef AVCAPI_COMMON_H_INCLUDED\n#include \"avcapi_common.h\"\n#endif\n\n/**\n This enumeration is used for the status returned from the library interface.\n*/\ntypedef enum\n{\n    /**\n    Fail information, need to add more error code for more specific info\n    */\n    AVCENC_TRAILINGONES_FAIL = -35,\n    AVCENC_SLICE_EMPTY = -34,\n    AVCENC_POC_FAIL = -33,\n    AVCENC_CONSECUTIVE_NONREF = -32,\n    AVCENC_CABAC_FAIL = -31,\n    AVCENC_PRED_WEIGHT_TAB_FAIL = -30,\n    AVCENC_DEC_REF_PIC_MARK_FAIL = -29,\n    AVCENC_SPS_FAIL = -28,\n    AVCENC_BITSTREAM_BUFFER_FULL    = -27,\n    AVCENC_BITSTREAM_INIT_FAIL = -26,\n    AVCENC_CHROMA_QP_FAIL = -25,\n    AVCENC_INIT_QS_FAIL = -24,\n    AVCENC_INIT_QP_FAIL = -23,\n    AVCENC_WEIGHTED_BIPRED_FAIL = -22,\n    AVCENC_INVALID_INTRA_PERIOD = -21,\n    AVCENC_INVALID_CHANGE_RATE = -20,\n    AVCENC_INVALID_BETA_OFFSET = -19,\n    AVCENC_INVALID_ALPHA_OFFSET = -18,\n    AVCENC_INVALID_DEBLOCK_IDC = -17,\n    AVCENC_INVALID_REDUNDANT_PIC = -16,\n    AVCENC_INVALID_FRAMERATE = -15,\n    AVCENC_INVALID_NUM_SLICEGROUP = -14,\n    AVCENC_INVALID_POC_LSB = -13,\n    AVCENC_INVALID_NUM_REF = -12,\n    AVCENC_INVALID_FMO_TYPE = -11,\n    AVCENC_ENCPARAM_MEM_FAIL = -10,\n    AVCENC_LEVEL_NOT_SUPPORTED = -9,\n    AVCENC_LEVEL_FAIL = -8,\n    AVCENC_PROFILE_NOT_SUPPORTED = -7,\n    AVCENC_TOOLS_NOT_SUPPORTED = -6,\n    AVCENC_WRONG_STATE = -5,\n    AVCENC_UNINITIALIZED = -4,\n    AVCENC_ALREADY_INITIALIZED = -3,\n    AVCENC_NOT_SUPPORTED = -2,\n    AVCENC_MEMORY_FAIL = AVC_MEMORY_FAIL,\n    AVCENC_FAIL = AVC_FAIL,\n    /**\n    Generic success value\n    */\n    AVCENC_SUCCESS = AVC_SUCCESS,\n    AVCENC_PICTURE_READY = 2,\n    AVCENC_NEW_IDR = 3, /* upon getting this, users have to call PVAVCEncodeSPS and PVAVCEncodePPS to get a new SPS and PPS*/\n    AVCENC_SKIPPED_PICTURE = 4 /* continuable error message */\n\n} AVCEnc_Status;\n\n#define MAX_NUM_SLICE_GROUP  8      /* maximum for all the profiles */\n\n/**\nThis structure contains the encoding parameters.\n*/\ntypedef struct tagAVCEncParam\n{\n    /* if profile/level is set to zero, encoder will choose the closest one for you */\n    AVCProfile profile; /* profile of the bitstream to be compliant with*/\n    AVCLevel   level;   /* level of the bitstream to be compliant with*/\n\n    int width;      /* width of an input frame in pixel */\n    int height;     /* height of an input frame in pixel */\n\n    int poc_type; /* picture order count mode, 0,1 or 2 */\n    /* for poc_type == 0 */\n    uint log2_max_poc_lsb_minus_4; /* specify maximum value of POC Lsb, range 0..12*/\n    /* for poc_type == 1 */\n    uint delta_poc_zero_flag; /* delta POC always zero */\n    int offset_poc_non_ref; /* offset for non-reference pic */\n    int offset_top_bottom; /* offset between top and bottom field */\n    uint num_ref_in_cycle; /* number of reference frame in one cycle */\n    int *offset_poc_ref; /* array of offset for ref pic, dimension [num_ref_in_cycle] */\n\n    int num_ref_frame;  /* number of reference frame used */\n    int num_slice_group;  /* number of slice group */\n    int fmo_type;   /* 0: interleave, 1: dispersed, 2: foreground with left-over\n                    3: box-out, 4:raster scan, 5:wipe, 6:explicit */\n    /* for fmo_type == 0 */\n    uint run_length_minus1[MAX_NUM_SLICE_GROUP];   /* array of size num_slice_group, in round robin fasion */\n    /* fmo_type == 2*/\n    uint top_left[MAX_NUM_SLICE_GROUP-1];           /* array of co-ordinates of each slice_group */\n    uint bottom_right[MAX_NUM_SLICE_GROUP-1];       /* except the last one which is the background. */\n    /* fmo_type == 3,4,5 */\n    AVCFlag change_dir_flag;  /* slice group change direction flag */\n    uint change_rate_minus1;\n    /* fmo_type == 6 */\n    uint *slice_group; /* array of size MBWidth*MBHeight */\n\n    AVCFlag db_filter;  /* enable deblocking loop filter */\n    int disable_db_idc;  /* 0: filter everywhere, 1: no filter, 2: no filter across slice boundary */\n    int alpha_offset;   /* alpha offset range -6,...,6 */\n    int beta_offset;    /* beta offset range -6,...,6 */\n\n    AVCFlag constrained_intra_pred; /* constrained intra prediction flag */\n\n    AVCFlag auto_scd;   /* scene change detection on or off */\n    int idr_period; /* idr frame refresh rate in number of target encoded frame (no concept of actual time).*/\n    int intramb_refresh;    /* minimum number of intra MB per frame */\n    AVCFlag data_par;   /* enable data partitioning */\n\n    AVCFlag fullsearch; /* enable full-pel full-search mode */\n    int search_range;   /* search range for motion vector in (-search_range,+search_range) pixels */\n    AVCFlag sub_pel;    /* enable sub pel prediction */\n    AVCFlag submb_pred; /* enable sub MB partition mode */\n    AVCFlag rdopt_mode; /* RD optimal mode selection */\n    AVCFlag bidir_pred; /* enable bi-directional for B-slice, this flag forces the encoder to encode\n                        any frame with POC less than the previously encoded frame as a B-frame.\n                        If it's off, then such frames will remain P-frame. */\n\n    AVCFlag rate_control; /* rate control enable, on: RC on, off: constant QP */\n    int initQP;     /* initial QP */\n    uint32 bitrate;    /* target encoding bit rate in bits/second */\n    uint32 CPB_size;  /* coded picture buffer in number of bits */\n    uint32 init_CBP_removal_delay; /* initial CBP removal delay in msec */\n\n    uint32 frame_rate;  /* frame rate in the unit of frames per 1000 second */\n    /* note, frame rate is only needed by the rate control, AVC is timestamp agnostic. */\n\n    AVCFlag out_of_band_param_set; /* flag to set whether param sets are to be retrieved up front or not */\n\n    AVCFlag use_overrun_buffer;  /* do not throw away the frame if output buffer is not big enough.\n                                    copy excess bits to the overrun buffer */\n} AVCEncParams;\n\n\n/**\nThis structure contains current frame encoding statistics for debugging purpose.\n*/\ntypedef struct tagAVCEncFrameStats\n{\n    int avgFrameQP;   /* average frame QP */\n    int numIntraMBs;  /* number of intra MBs */\n    int numFalseAlarm;\n    int numMisDetected;\n    int numDetected;\n\n} AVCEncFrameStats;\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n    /** THE FOLLOWINGS ARE APIS */\n    /**\n    This function initializes the encoder library. It verifies the validity of the\n    encoding parameters against the specified profile/level and the list of supported\n    tools by this library. It allocates necessary memories required to perform encoding.\n    For re-encoding application, if users want to setup encoder in a more precise way,\n    users can give the external SPS and PPS to the encoder to follow.\n    \\param \"avcHandle\"  \"Handle to the AVC encoder library object.\"\n    \\param \"encParam\"   \"Pointer to the encoding parameter structure.\"\n    \\param \"extSPS\"     \"External SPS used for re-encoding purpose. NULL if not present\"\n    \\param \"extPPS\"     \"External PPS used for re-encoding purpose. NULL if not present\"\n    \\return \"AVCENC_SUCCESS for success,\n             AVCENC_NOT_SUPPORTED for the use of unsupported tools,\n             AVCENC_MEMORY_FAIL for memory allocation failure,\n             AVCENC_FAIL for generic failure.\"\n    */\n    OSCL_IMPORT_REF AVCEnc_Status PVAVCEncInitialize(AVCHandle *avcHandle, AVCEncParams *encParam, void* extSPS, void* extPPS);\n\n\n    /**\n    Since the output buffer size is not known prior to encoding a frame, users need to\n    allocate big enough buffer otherwise, that frame will be dropped. This function returns\n    the size of the output buffer to be allocated by the users that guarantees to hold one frame.\n    It follows the CPB spec for a particular level.  However, when the users set use_overrun_buffer\n    flag, this API is useless as excess output bits are saved in the overrun buffer waiting to be\n    copied out in small chunks, i.e. users can allocate any size of output buffer.\n    \\param \"avcHandle\"  \"Handle to the AVC encoder library object.\"\n    \\param \"size\"   \"Pointer to the size to be modified.\"\n    \\return \"AVCENC_SUCCESS for success, AVCENC_UNINITIALIZED when level is not known.\n    */\n\n    OSCL_IMPORT_REF AVCEnc_Status PVAVCEncGetMaxOutputBufferSize(AVCHandle *avcHandle, int* size);\n\n    /**\n    Users call this function to provide an input structure to the encoder library which will keep\n    a list of input structures it receives in case the users call this function many time before\n    calling PVAVCEncodeSlice. The encoder library will encode them according to the frame_num order.\n    Users should not modify the content of a particular frame until this frame is encoded and\n    returned thru CBAVCEnc_ReturnInput() callback function.\n    \\param \"avcHandle\"  \"Handle to the AVC encoder library object.\"\n    \\param \"input\"      \"Pointer to the input structure.\"\n    \\return \"AVCENC_SUCCESS for success,\n            AVCENC_FAIL if the encoder is not in the right state to take a new input frame.\n            AVCENC_NEW_IDR for the detection or determination of a new IDR, with this status,\n            the returned NAL is an SPS NAL,\n            AVCENC_NO_PICTURE if the input frame coding timestamp is too early, users must\n            get next frame or adjust the coding timestamp.\"\n    */\n    OSCL_IMPORT_REF AVCEnc_Status PVAVCEncSetInput(AVCHandle *avcHandle, AVCFrameIO *input);\n\n    /**\n    This function is called to encode a NAL unit which can be an SPS NAL, a PPS NAL or\n    a VCL (video coding layer) NAL which contains one slice of data. It could be a\n    fixed number of macroblocks, as specified in the encoder parameters set, or the\n    maximum number of macroblocks fitted into the given input argument \"buffer\". The\n    input frame is taken from the oldest unencoded input frame retrieved by users by\n    PVAVCEncGetInput API.\n    \\param \"avcHandle\"  \"Handle to the AVC encoder library object.\"\n    \\param \"buffer\"     \"Pointer to the output AVC bitstream buffer, the format will be EBSP,\n                         not RBSP.\"\n    \\param \"buf_nal_size\"   \"As input, the size of the buffer in bytes.\n                        This is the physical limitation of the buffer. As output, the size of the EBSP.\"\n    \\param \"nal_type\"   \"Pointer to the NAL type of the returned buffer.\"\n    \\return \"AVCENC_SUCCESS for success of encoding one slice,\n             AVCENC_PICTURE_READY for the completion of a frame encoding,\n             AVCENC_FAIL for failure (this should not occur, though).\"\n    */\n    OSCL_IMPORT_REF AVCEnc_Status PVAVCEncodeNAL(AVCHandle *avcHandle, uint8 *buffer, uint *buf_nal_size, int *nal_type);\n\n    /**\n    This function sniffs the nal_unit_type such that users can call corresponding APIs.\n    This function is identical to PVAVCDecGetNALType() in the decoder.\n    \\param \"bitstream\"  \"Pointer to the beginning of a NAL unit (start with forbidden_zero_bit, etc.).\"\n    \\param \"size\"       \"size of the bitstream (NumBytesInNALunit + 1).\"\n    \\param \"nal_unit_type\" \"Pointer to the return value of nal unit type.\"\n    \\return \"AVCENC_SUCCESS if success, AVCENC_FAIL otherwise.\"\n    */\n    OSCL_IMPORT_REF AVCEnc_Status PVAVCEncGetNALType(uint8 *bitstream, int size, int *nal_type, int *nal_ref_idc);\n\n    /**\n    This function gets the profile and level.\n    \\param \"avcHandle\"  \"Handle to the AVC encoder library object.\"\n    \\param \"profile\"    \"profile value\"\n    \\param \"level\"      \"level value\"\n    \\return \"AVCENC_SUCCESS if success, AVCENC_UNINITIALIZED if encoder obj is NULL.\"\n    */\n    OSCL_IMPORT_REF AVCEnc_Status PVAVCEncGetProfileLevel(AVCHandle* avcHandle, AVCProfile* profile, AVCLevel* level);\n\n    /**\n    This function returns the pointer to internal overrun buffer. Users can call this to query\n    whether the overrun buffer has been used to encode the current NAL.\n    \\param \"avcHandle\"  \"Pointer to the handle.\"\n    \\return \"Pointer to overrun buffer if it is used, otherwise, NULL.\"\n    */\n    OSCL_IMPORT_REF uint8* PVAVCEncGetOverrunBuffer(AVCHandle* avcHandle);\n\n    /**\n    This function returns the reconstructed frame of the most recently encoded frame.\n    Note that this frame is not returned to the users yet. Users should only read the\n    content of this frame.\n    \\param \"avcHandle\"  \"Handle to the AVC encoder library object.\"\n    \\param \"output\"     \"Pointer to the input structure.\"\n    \\return \"AVCENC_SUCCESS for success, AVCENC_NO_PICTURE if no picture to be outputted.\"\n    */\n    OSCL_IMPORT_REF AVCEnc_Status PVAVCEncGetRecon(AVCHandle *avcHandle, AVCFrameIO *recon);\n\n    /**\n    This function is used to return the recontructed frame back to the AVC encoder library\n    in order to be re-used for encoding operation. If users want the content of it to remain\n    unchanged for a long time, they should make a copy of it and release the memory back to\n    the encoder. The encoder relies on the id element in the AVCFrameIO structure,\n    thus users should not change the id value.\n    \\param \"avcHandle\"  \"Handle to the AVC decoder library object.\"\n    \\param \"output\"      \"Pointer to the AVCFrameIO structure.\"\n    \\return \"AVCENC_SUCCESS for success, AVCENC_FAIL for fail for id not found.\"\n    */\n    OSCL_IMPORT_REF AVCEnc_Status PVAVCEncReleaseRecon(AVCHandle *avcHandle, AVCFrameIO *recon);\n\n    /**\n    This function performs clean up operation including memory deallocation.\n    The encoder will also clear the list of input structures it has not released.\n    This implies that users must keep track of the number of input structure they have allocated\n    and free them accordingly.\n    \\param \"avcHandle\"  \"Handle to the AVC encoder library object.\"\n    */\n    OSCL_IMPORT_REF void    PVAVCCleanUpEncoder(AVCHandle *avcHandle);\n\n    /**\n    This function extracts statistics of the current frame. If the encoder has not finished\n    with the current frame, the result is not accurate.\n    \\param \"avcHandle\"  \"Handle to the AVC encoder library object.\"\n    \\param \"avcStats\"   \"Pointer to AVCEncFrameStats structure.\"\n    \\return \"void.\"\n    */\n    void PVAVCEncGetFrameStats(AVCHandle *avcHandle, AVCEncFrameStats *avcStats);\n\n    /**\n    These functions are used for the modification of encoding parameters.\n    To be polished.\n    */\n    OSCL_IMPORT_REF AVCEnc_Status PVAVCEncUpdateBitRate(AVCHandle *avcHandle, uint32 bitrate);\n    OSCL_IMPORT_REF AVCEnc_Status PVAVCEncUpdateFrameRate(AVCHandle *avcHandle, uint32 num, uint32 denom);\n    OSCL_IMPORT_REF AVCEnc_Status PVAVCEncUpdateIDRInterval(AVCHandle *avcHandle, int IDRInterval);\n    OSCL_IMPORT_REF AVCEnc_Status PVAVCEncIDRRequest(AVCHandle *avcHandle);\n    OSCL_IMPORT_REF AVCEnc_Status PVAVCEncUpdateIMBRefresh(AVCHandle *avcHandle, int numMB);\n\n\n#ifdef __cplusplus\n}\n#endif\n#endif  /* _AVCENC_API_H_ */\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/avcenc_int.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/**\nThis file contains application function interfaces to the AVC encoder library\nand necessary type defitionitions and enumerations.\n@publishedAll\n*/\n\n#ifndef AVCENC_INT_H_INCLUDED\n#define AVCENC_INT_H_INCLUDED\n\n#ifndef AVCINT_COMMON_H_INCLUDED\n#include \"avcint_common.h\"\n#endif\n#ifndef AVCENC_API_H_INCLUDED\n#include \"avcenc_api.h\"\n#endif\n\n/* Definition for the structures below */\n#define DEFAULT_ATTR    0 /* default memory attribute */\n#define MAX_INPUT_FRAME 30 /* some arbitrary number, it can be much higher than this. */\n#define MAX_REF_FRAME  16 /* max size of the RefPicList0 and RefPicList1 */\n#define MAX_REF_PIC_LIST 33\n\n#define MIN_QP          0\n#define MAX_QP          51\n#define SHIFT_QP        12\n#define  LAMBDA_ACCURACY_BITS         16\n#define  LAMBDA_FACTOR(lambda)        ((int)((double)(1<<LAMBDA_ACCURACY_BITS)*lambda+0.5))\n\n\n#define DISABLE_THRESHOLDING  0\n// for better R-D performance\n#define _LUMA_COEFF_COST_       4 //!< threshold for luma coeffs\n#define _CHROMA_COEFF_COST_     4 //!< threshold for chroma coeffs, used to be 7\n#define _LUMA_MB_COEFF_COST_    5 //!< threshold for luma coeffs of inter Macroblocks\n#define _LUMA_8x8_COEFF_COST_   5 //!< threshold for luma coeffs of 8x8 Inter Partition\n#define MAX_VALUE       999999   //!< used for start value for some variables\n\n#define  WEIGHTED_COST(factor,bits)   (((factor)*(bits))>>LAMBDA_ACCURACY_BITS)\n#define  MV_COST(f,s,cx,cy,px,py)     (WEIGHTED_COST(f,mvbits[((cx)<<(s))-px]+mvbits[((cy)<<(s))-py]))\n#define  MV_COST_S(f,cx,cy,px,py)     (WEIGHTED_COST(f,mvbits[cx-px]+mvbits[cy-py]))\n\n/* for sub-pel search and interpolation */\n#define SUBPEL_PRED_BLK_SIZE 576 // 24x24\n#define REF_CENTER 75\n#define V2Q_H0Q 1\n#define V0Q_H2Q 2\n#define V2Q_H2Q 3\n\n/*\n#define V3Q_H0Q 1\n#define V3Q_H1Q 2\n#define V0Q_H1Q 3\n#define V1Q_H1Q 4\n#define V1Q_H0Q 5\n#define V1Q_H3Q 6\n#define V0Q_H3Q 7\n#define V3Q_H3Q 8\n#define V2Q_H3Q 9\n#define V2Q_H0Q 10\n#define V2Q_H1Q 11\n#define V2Q_H2Q 12\n#define V3Q_H2Q 13\n#define V0Q_H2Q 14\n#define V1Q_H2Q 15\n*/\n\n\n#define DEFAULT_OVERRUN_BUFFER_SIZE 1000\n\n// associated with the above cost model\nconst uint8 COEFF_COST[2][16] =\n{\n    {3, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},\n    {9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}\n};\n\n\n\n//! convert from H.263 QP to H.264 quant given by: quant=pow(2,QP/6)\nconst int QP2QUANT[40] =\n{\n    1, 1, 1, 1, 2, 2, 2, 2,\n    3, 3, 3, 4, 4, 4, 5, 6,\n    6, 7, 8, 9, 10, 11, 13, 14,\n    16, 18, 20, 23, 25, 29, 32, 36,\n    40, 45, 51, 57, 64, 72, 81, 91\n};\n\n\n/**\nThis enumeration keeps track of the internal status of the encoder whether it is doing\nsomething. The encoding flow follows the order in which these states are.\n@publishedAll\n*/\ntypedef enum\n{\n    AVCEnc_Initializing = 0,\n    AVCEnc_Encoding_SPS,\n    AVCEnc_Encoding_PPS,\n    AVCEnc_Analyzing_Frame,\n    AVCEnc_WaitingForBuffer,  // pending state\n    AVCEnc_Encoding_Frame,\n} AVCEnc_State ;\n\n/**\nBitstream structure contains bitstream related parameters such as the pointer\nto the buffer, the current byte position and bit position. The content of the\nbitstreamBuffer will be in EBSP format as the emulation prevention codes are\nautomatically inserted as the RBSP is recorded.\n@publishedAll\n*/\ntypedef struct tagEncBitstream\n{\n    uint8 *bitstreamBuffer; /* pointer to buffer memory   */\n    int buf_size;       /* size of the buffer memory */\n    int write_pos;      /* next position to write to bitstreamBuffer  */\n    int count_zeros;   /* count number of consecutive zero */\n    uint current_word;  /* byte-swapped (MSB left) current word to write to buffer */\n    int bit_left;      /* number of bit left in current_word */\n    uint8   *overrunBuffer;  /* extra output buffer to prevent current skip due to output buffer overrun*/\n    int     oBSize;     /* size of allocated overrun buffer */\n    void   *encvid; /* pointer to the main object */\n\n} AVCEncBitstream;\n\n/**\nThis structure is used for rate control purpose and other performance related control\nvariables such as, RD cost, statistics, motion search stuffs, etc.\nshould be in this structure.\n@publishedAll\n*/\n\n\ntypedef struct tagRDInfo\n{\n    int QP;\n    int actual_bits;\n    OsclFloat mad;\n    OsclFloat R_D;\n} RDInfo;\n\ntypedef struct tagMultiPass\n{\n    /* multipass rate control data */\n    int target_bits;    /* target bits for current frame, = rc->T */\n    int actual_bits;    /* actual bits for current frame obtained after encoding, = rc->Rc*/\n    int QP;             /* quantization level for current frame, = rc->Qc*/\n    int prev_QP;        /* quantization level for previous frame */\n    int prev_prev_QP;   /* quantization level for previous frame before last*/\n    OsclFloat mad;          /* mad for current frame, = video->avgMAD*/\n    int bitrate;        /* bitrate for current frame */\n    OsclFloat framerate;    /* framerate for current frame*/\n\n    int nRe_Quantized;  /* control variable for multipass encoding, */\n    /* 0 : first pass */\n    /* 1 : intermediate pass(quantization and VLC loop only) */\n    /* 2 : final pass(de-quantization, idct, etc) */\n    /* 3 : macroblock level rate control */\n\n    int encoded_frames;     /* counter for all encoded frames */\n    int re_encoded_frames;  /* counter for all multipass encoded frames*/\n    int re_encoded_times;   /* counter for all times of multipass frame encoding */\n\n    /* Multiple frame prediction*/\n    RDInfo **pRDSamples;        /* pRDSamples[30][32], 30->30fps, 32 -> 5 bit quantizer, 32 candidates*/\n    int framePos;               /* specific position in previous multiple frames*/\n    int frameRange;             /* number of overall previous multiple frames */\n    int samplesPerFrame[30];    /* number of samples per frame, 30->30fps */\n\n    /* Bit allocation for scene change frames and high motion frames */\n    OsclFloat sum_mad;\n    int counter_BTsrc;  /* BT = Bit Transfer, bit transfer from low motion frames or less complicatedly compressed frames */\n    int counter_BTdst;  /* BT = Bit Transfer, bit transfer to scene change frames or high motion frames or more complicatedly compressed frames */\n    OsclFloat sum_QP;\n    int diff_counter;   /* diff_counter = -diff_counter_BTdst, or diff_counter_BTsrc */\n\n    /* For target bitrate or framerate update */\n    OsclFloat target_bits_per_frame;        /* = C = bitrate/framerate */\n    OsclFloat target_bits_per_frame_prev;   /* previous C */\n    OsclFloat aver_mad;                     /* so-far average mad could replace sum_mad */\n    OsclFloat aver_mad_prev;                /* previous average mad */\n    int   overlapped_win_size;          /* transition period of time */\n    int   encoded_frames_prev;          /* previous encoded_frames */\n} MultiPass;\n\n\ntypedef struct tagdataPointArray\n{\n    int Qp;\n    int Rp;\n    OsclFloat Mp;   /* for MB-based RC */\n    struct tagdataPointArray *next;\n    struct tagdataPointArray *prev;\n} dataPointArray;\n\ntypedef struct tagAVCRateControl\n{\n\n    /* these parameters are initialized by the users AVCEncParams */\n    /* bitrate-robustness tradeoff */\n    uint scdEnable; /* enable scene change detection */\n    int idrPeriod;  /* IDR period in number of frames */\n    int intraMBRate;   /* intra MB refresh rate per frame */\n    uint dpEnable;  /* enable data partitioning */\n\n    /* quality-complexity tradeoff */\n    uint subPelEnable;  /* enable quarter pel search */\n    int mvRange;    /* motion vector search range in +/- pixel */\n    uint subMBEnable;  /* enable sub MB prediction mode (4x4, 4x8, 8x4) */\n    uint rdOptEnable;  /* enable RD-opt mode selection */\n    uint twoPass; /* flag for 2 pass encoding ( for future )*/\n    uint bidirPred; /* bi-directional prediction for B-frame. */\n\n    uint rcEnable;  /* enable rate control, '1' on, '0' const QP */\n    int initQP; /* initial QP */\n\n    /* note the following 3 params are for HRD, these triplets can be a series\n    of triplets as the generalized HRD allows. SEI message must be generated in this case. */\n    /* We no longer have to differentiate between CBR and VBR. The users to the\n    AVC encoder lib will do the mapping from CBR/VBR to these parameters. */\n    int32 bitRate;  /* target bit rate for the overall clip in bits/second*/\n    int32 cpbSize;  /* coded picture buffer size in bytes */\n    int32 initDelayOffset; /* initial CBP removal delay in bits */\n\n    OsclFloat frame_rate; /* frame rate */\n    int srcInterval; /* source frame rate in msec */\n    int basicUnit;  /* number of macroblocks per BU */\n\n    /* Then internal parameters for the operation */\n    uint first_frame; /* a flag for the first frame */\n    int lambda_mf; /* for example */\n    int totalSAD;    /* SAD of current frame */\n\n    /*******************************************/\n    /* this part comes from MPEG4 rate control */\n    int alpha;  /* weight for I frame */\n    int Rs;     /*bit rate for the sequence (or segment) e.g., 24000 bits/sec */\n    int Rc;     /*bits used for the current frame. It is the bit count obtained after encoding. */\n    int Rp;     /*bits to be removed from the buffer per picture. */\n    /*? is this the average one, or just the bits coded for the previous frame */\n    int Rps;    /*bit to be removed from buffer per src frame */\n    OsclFloat Ts;   /*number of seconds for the sequence  (or segment). e.g., 10 sec */\n    OsclFloat Ep;\n    OsclFloat Ec;   /*mean absolute difference for the current frame after motion compensation.*/\n    /*If the macroblock is intra coded, the original spatial pixel values are summed.*/\n    int Qc;     /*quantization level used for the current frame. */\n    int Nr;     /*number of P frames remaining for encoding.*/\n    int Rr; /*number of bits remaining for encoding this sequence (or segment).*/\n    int Rr_Old;\n    int T;      /*target bit to be used for the current frame.*/\n    int S;      /*number of bits used for encoding the previous frame.*/\n    int Hc; /*header and motion vector bits used in the current frame. It includes all the  information except to the residual information.*/\n    int Hp; /*header and motion vector bits used in the previous frame. It includes all the     information except to the residual information.*/\n    int Ql; /*quantization level used in the previous frame */\n    int Bs; /*buffer size e.g., R/2 */\n    int B;      /*current buffer level e.g., R/4 - start from the middle of the buffer */\n    OsclFloat X1;\n    OsclFloat X2;\n    OsclFloat X11;\n    OsclFloat M;            /*safe margin for the buffer */\n    OsclFloat smTick;    /*ratio of src versus enc frame rate */\n    double remnant;  /*remainder frame of src/enc frame for fine frame skipping */\n    int timeIncRes; /* vol->timeIncrementResolution */\n\n    dataPointArray   *end; /*quantization levels for the past (20) frames */\n\n    int     frameNumber; /* ranging from 0 to 20 nodes*/\n    int     w;\n    int     Nr_Original;\n    int     Nr_Old, Nr_Old2;\n    int     skip_next_frame;\n    int     Qdep;       /* smooth Q adjustment */\n    int     VBR_Enabled;\n\n    int totalFrameNumber; /* total coded frames, for debugging!!*/\n\n    char    oFirstTime;\n\n    int numFrameBits; /* keep track of number of bits of the current frame */\n    int NumberofHeaderBits;\n    int NumberofTextureBits;\n    int numMBHeaderBits;\n    int numMBTextureBits;\n    double *MADofMB;\n    int32 bitsPerFrame;\n\n    /* BX rate control, something like TMN8 rate control*/\n\n    MultiPass *pMP;\n\n    int     TMN_W;\n    int     TMN_TH;\n    int     VBV_fullness;\n    int     max_BitVariance_num; /* the number of the maximum bit variance within the given buffer with the unit of 10% of bitrate/framerate*/\n    int     encoded_frames; /* counter for all encoded frames */\n    int     low_bound;              /* bound for underflow detection, usually low_bound=-Bs/2, but could be changed in H.263 mode */\n    int     VBV_fullness_offset;    /* offset of VBV_fullness, usually is zero, but can be changed in H.263 mode*/\n    /* End BX */\n\n} AVCRateControl;\n\n\n/**\nThis structure is for the motion vector information. */\ntypedef struct tagMV\n{\n    int x;\n    int y;\n    uint sad;\n} AVCMV;\n\n/**\nThis structure contains function pointers for different platform dependent implementation of\nfunctions. */\ntypedef struct tagAVCEncFuncPtr\n{\n\n    int (*SAD_MB_HalfPel[4])(uint8*, uint8*, int, void *);\n    int (*SAD_Macroblock)(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);\n\n} AVCEncFuncPtr;\n\n/**\nThis structure contains information necessary for correct padding.\n*/\ntypedef struct tagPadInfo\n{\n    int i;\n    int width;\n    int j;\n    int height;\n} AVCPadInfo;\n\n\n#ifdef HTFM\ntypedef struct tagHTFM_Stat\n{\n    int abs_dif_mad_avg;\n    uint countbreak;\n    int offsetArray[16];\n    int offsetRef[16];\n} HTFM_Stat;\n#endif\n\n\n/**\nThis structure is the main object for AVC encoder library providing access to all\nglobal variables. It is allocated at PVAVCInitEncoder and freed at PVAVCCleanUpEncoder.\n@publishedAll\n*/\ntypedef struct tagEncObject\n{\n\n    AVCCommonObj *common;\n\n    AVCEncBitstream     *bitstream; /* for current NAL */\n    uint8   *overrunBuffer;  /* extra output buffer to prevent current skip due to output buffer overrun*/\n    int     oBSize;     /* size of allocated overrun buffer */\n\n    /* rate control */\n    AVCRateControl      *rateCtrl; /* pointer to the rate control structure */\n\n    /* encoding operation */\n    AVCEnc_State        enc_state; /* encoding state */\n\n    AVCFrameIO          *currInput; /* pointer to the current input frame */\n\n    int                 currSliceGroup; /* currently encoded slice group id */\n\n    int     level[24][16], run[24][16]; /* scratch memory */\n    int     leveldc[16], rundc[16]; /* for DC component */\n    int     levelcdc[16], runcdc[16]; /* for chroma DC component */\n    int     numcoefcdc[2]; /* number of coefficient for chroma DC */\n    int     numcoefdc;      /* number of coefficients for DC component */\n\n    int     qp_const;\n    int     qp_const_c;\n    /********* intra prediction scratch memory **********************/\n    uint8   pred_i16[AVCNumI16PredMode][256]; /* save prediction for MB */\n    uint8   pred_i4[AVCNumI4PredMode][16];  /* save prediction for blk */\n    uint8   pred_ic[AVCNumIChromaMode][128];  /* for 2 chroma */\n\n    int     mostProbableI4Mode[16]; /* in raster scan order */\n    /********* motion compensation related variables ****************/\n    AVCMV   *mot16x16;          /* Saved motion vectors for 16x16 block*/\n    AVCMV(*mot16x8)[2];     /* Saved motion vectors for 16x8 block*/\n    AVCMV(*mot8x16)[2];     /* Saved motion vectors for 8x16 block*/\n    AVCMV(*mot8x8)[4];      /* Saved motion vectors for 8x8 block*/\n\n    /********* subpel position **************************************/\n    uint32  subpel_pred[SUBPEL_PRED_BLK_SIZE/*<<2*/]; /* all 16 sub-pel positions  */\n    uint8   *hpel_cand[9];      /* pointer to half-pel position */\n    int     best_hpel_pos;          /* best position */\n    uint8   qpel_cand[8][24*16];        /* pointer to quarter-pel position */\n    int     best_qpel_pos;\n    uint8   *bilin_base[9][4];    /* pointer to 4 position at top left of bilinear quarter-pel */\n\n    /* need for intra refresh rate */\n    uint8   *intraSearch;       /* Intra Array for MBs to be intra searched */\n    uint    firstIntraRefreshMBIndx; /* keep track for intra refresh */\n\n    int     i4_sad;             /* temporary for i4 mode SAD */\n    int     *min_cost;          /* Minimum cost for the all MBs */\n    int     lambda_mode;        /* Lagrange parameter for mode selection */\n    int     lambda_motion;      /* Lagrange parameter for MV selection */\n\n    uint8   *mvbits_array;      /* Table for bits spent in the cost funciton */\n    uint8   *mvbits;            /* An offset to the above array. */\n\n    /* to speedup the SAD calculation */\n    void *sad_extra_info;\n    uint8 currYMB[256];     /* interleaved current macroblock in HTFM order */\n\n#ifdef HTFM\n    int nrmlz_th[48];       /* Threshold for fast SAD calculation using HTFM */\n    HTFM_Stat htfm_stat;    /* For statistics collection */\n#endif\n\n    /* statistics */\n    int numIntraMB;         /* keep track of number of intra MB */\n\n    /* encoding complexity control */\n    uint fullsearch_enable; /* flag to enable full-pel full-search */\n\n    /* misc.*/\n    bool outOfBandParamSet; /* flag to enable out-of-band param set */\n\n    AVCSeqParamSet extSPS; /* for external SPS */\n    AVCPicParamSet extPPS; /* for external PPS */\n\n    /* time control */\n    uint32  prevFrameNum;   /* previous frame number starting from modTimeRef */\n    uint32  modTimeRef;     /* Reference modTime update every I-Vop*/\n    uint32  wrapModTime;    /* Offset to modTime Ref, rarely used */\n\n    uint    prevProcFrameNum;  /* previously processed frame number, could be skipped */\n    uint    prevCodedFrameNum;  /* previously encoded frame number */\n    /* POC related variables */\n    uint32  dispOrdPOCRef;      /* reference POC is displayer order unit. */\n\n    /* Function pointers */\n    AVCEncFuncPtr *functionPointer; /* store pointers to platform specific functions */\n\n    /* Application control data */\n    AVCHandle *avcHandle;\n\n\n} AVCEncObject;\n\n\n#endif /*AVCENC_INT_H_INCLUDED*/\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/avcenc_lib.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2010 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/**\nThis file contains declarations of internal functions for AVC decoder library.\n@publishedAll\n*/\n#ifndef AVCENC_LIB_H_INCLUDED\n#define AVCENC_LIB_H_INCLUDED\n\n#ifndef AVCLIB_COMMON_H_INCLUDED\n#include \"avclib_common.h\"\n#endif\n#ifndef AVCENC_INT_H_INCLUDED\n#include \"avcenc_int.h\"\n#endif\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n    /*------------- block.c -------------------------*/\n\n    /**\n    This function perform residue calculation, transform, quantize, inverse quantize,\n    inverse transform and residue compensation on a 4x4 block.\n    \\param \"encvid\" \"Pointer to AVCEncObject.\"\n    \\param \"blkidx\"  \"raster scan block index of the current 4x4 block.\"\n    \\param \"cur\"    \"Pointer to the reconstructed block.\"\n    \\param \"org\"    \"Pointer to the original block.\"\n    \\param \"coef_cost\"  \"Pointer to the coefficient cost to be filled in and returned.\"\n    \\return \"Number of non-zero coefficients.\"\n    */\n    int dct_luma(AVCEncObject *encvid, int blkidx, uint8 *cur, uint8 *org, int *coef_cost);\n\n    /**\n    This function performs IDCT on an INTER macroblock.\n    \\param \"video\"  \"Pointer to AVCCommonObj.\"\n    \\param \"curL\"   \"Pointer to the origin of the macroblock on the current frame.\"\n    \\param \"currMB\" \"Pointer to the AVCMacroblock structure.\"\n    \\param \"picPitch\" \"Pitch of the current frame.\"\n    \\return \"void\".\n    */\n    void MBInterIdct(AVCCommonObj *video, uint8 *curL, AVCMacroblock *currMB, int picPitch);\n\n    /**\n    This function perform residue calculation, transform, quantize, inverse quantize,\n    inverse transform and residue compensation on a macroblock.\n    \\param \"encvid\" \"Pointer to AVCEncObject.\"\n    \\param \"curL\"   \"Pointer to the reconstructed MB.\"\n    \\param \"orgL\"    \"Pointer to the original MB.\"\n    \\return \"void\"\n    */\n    void dct_luma_16x16(AVCEncObject *encvid, uint8 *curL, uint8 *orgL);\n\n    /**\n    This function perform residue calculation, transform, quantize, inverse quantize,\n    inverse transform and residue compensation for chroma components of an MB.\n    \\param \"encvid\" \"Pointer to AVCEncObject.\"\n    \\param \"curC\"   \"Pointer to the reconstructed MB.\"\n    \\param \"orgC\"    \"Pointer to the original MB.\"\n    \\param \"cr\"     \"Flag whether it is Cr or not.\"\n    \\return \"void\"\n    */\n    void dct_chroma(AVCEncObject *encvid, uint8 *curC, uint8 *orgC, int cr);\n\n    /*----------- init.c ------------------*/\n    /**\n    This function interprets the encoding parameters provided by users in encParam.\n    The results are kept in AVCEncObject, AVCSeqParamSet, AVCPicParamSet and AVCSliceHeader.\n    \\param \"encvid\"     \"Pointer to AVCEncObject.\"\n    \\param \"encParam\"   \"Pointer to AVCEncParam.\"\n    \\param \"extSPS\"     \"External SPS template to be followed. NULL if not present.\"\n    \\param \"extPPS\"     \"External PPS template to be followed. NULL if not present.\"\n    \\return \"see AVCEnc_Status.\"\n    */\n    AVCEnc_Status  SetEncodeParam(AVCHandle *avcHandle, AVCEncParams *encParam,\n                                  void *extSPS, void *extPPS);\n\n    /**\n    This function verifies the encoding parameters whether they meet the set of supported\n    tool by a specific profile. If the profile is not set, it will just find the closest\n    profile instead of verifying it.\n    \\param \"video\"  \"Pointer to AVCEncObject.\"\n    \\param \"seqParam\"   \"Pointer to AVCSeqParamSet.\"\n    \\param \"picParam\"   \"Pointer to AVCPicParamSet.\"\n    \\return \"AVCENC_SUCCESS if success,\n            AVCENC_PROFILE_NOT_SUPPORTED if the specified profile\n                is not supported by this version of the library,\n            AVCENC_TOOLS_NOT_SUPPORTED if any of the specified encoding tools are\n            not supported by the user-selected profile.\"\n    */\n    AVCEnc_Status VerifyProfile(AVCEncObject *video, AVCSeqParamSet *seqParam, AVCPicParamSet *picParam);\n\n    /**\n    This function verifies the encoding parameters whether they meet the requirement\n    for a specific level. If the level is not set, it will just find the closest\n    level instead of verifying it.\n    \\param \"video\"  \"Pointer to AVCEncObject.\"\n    \\param \"seqParam\"   \"Pointer to AVCSeqParamSet.\"\n    \\param \"picParam\"   \"Pointer to AVCPicParamSet.\"\n    \\return \"AVCENC_SUCCESS if success,\n            AVCENC_LEVEL_NOT_SUPPORTED if the specified level\n                is not supported by this version of the library,\n            AVCENC_LEVEL_FAIL if any of the encoding parameters exceed\n            the range of the user-selected level.\"\n    */\n    AVCEnc_Status VerifyLevel(AVCEncObject *video, AVCSeqParamSet *seqParam, AVCPicParamSet *picParam);\n\n    /**\n    This funciton initializes the frame encoding by setting poc/frame_num related parameters. it\n    also performs motion estimation.\n    \\param \"encvid\" \"Pointer to the AVCEncObject.\"\n    \\return \"AVCENC_SUCCESS if success, AVCENC_NO_PICTURE if there is no input picture\n            in the queue to encode, AVCENC_POC_FAIL or AVCENC_CONSECUTIVE_NONREF for POC\n            related errors, AVCENC_NEW_IDR if new IDR is detected.\"\n    */\n    AVCEnc_Status InitFrame(AVCEncObject *encvid);\n\n    /**\n    This function initializes slice header related variables and other variables necessary\n    for decoding one slice.\n    \\param \"encvid\" \"Pointer to the AVCEncObject.\"\n    \\return \"AVCENC_SUCCESS if success.\"\n    */\n    AVCEnc_Status InitSlice(AVCEncObject *encvid);\n\n    /*----------- header.c ----------------*/\n    /**\n    This function performs bitstream encoding of the sequence parameter set NAL.\n    \\param \"encvid\" \"Pointer to the AVCEncObject.\"\n    \\param \"stream\" \"Pointer to AVCEncBitstream.\"\n    \\return \"AVCENC_SUCCESS if success or AVCENC_SPS_FAIL or others for unexpected failure which\n    should not occur. The SPS parameters should all be verified before this function is called.\"\n    */\n    AVCEnc_Status EncodeSPS(AVCEncObject *encvid, AVCEncBitstream *stream);\n\n    /**\n    This function encodes the VUI parameters into the sequence parameter set bitstream.\n    \\param \"stream\" \"Pointer to AVCEncBitstream.\"\n    \\param \"vui\"    \"Pointer to AVCVUIParams.\"\n    \\return \"nothing.\"\n    */\n    void EncodeVUI(AVCEncBitstream* stream, AVCVUIParams* vui);\n\n    /**\n    This function encodes HRD parameters into the sequence parameter set bitstream\n    \\param \"stream\" \"Pointer to AVCEncBitstream.\"\n    \\param \"hrd\"    \"Pointer to AVCHRDParams.\"\n    \\return \"nothing.\"\n    */\n    void EncodeHRD(AVCEncBitstream* stream, AVCHRDParams* hrd);\n\n\n    /**\n    This function performs bitstream encoding of the picture parameter set NAL.\n    \\param \"encvid\" \"Pointer to the AVCEncObject.\"\n    \\param \"stream\" \"Pointer to AVCEncBitstream.\"\n    \\return \"AVCENC_SUCCESS if success or AVCENC_PPS_FAIL or others for unexpected failure which\n    should not occur. The SPS parameters should all be verified before this function is called.\"\n    */\n    AVCEnc_Status EncodePPS(AVCEncObject *encvid, AVCEncBitstream *stream);\n\n    /**\n    This function encodes slice header information which has been initialized or fabricated\n    prior to entering this funciton.\n    \\param \"encvid\" \"Pointer to the AVCEncObject.\"\n    \\param \"stream\" \"Pointer to AVCEncBitstream.\"\n    \\return \"AVCENC_SUCCESS if success or bitstream fail statuses.\"\n    */\n    AVCEnc_Status EncodeSliceHeader(AVCEncObject *encvid, AVCEncBitstream *stream);\n\n    /**\n    This function encodes reference picture list reordering relted syntax.\n    \\param \"video\" \"Pointer to AVCCommonObj.\"\n    \\param \"stream\" \"Pointer to AVCEncBitstream.\"\n    \\param \"sliceHdr\" \"Pointer to AVCSliceHdr.\"\n    \\param \"slice_type\" \"Value of slice_type - 5 if greater than 5.\"\n    \\return \"AVCENC_SUCCESS for success and AVCENC_FAIL otherwise.\"\n    */\n    AVCEnc_Status ref_pic_list_reordering(AVCCommonObj *video, AVCEncBitstream *stream, AVCSliceHeader *sliceHdr, int slice_type);\n\n    /**\n    This function encodes dec_ref_pic_marking related syntax.\n    \\param \"video\" \"Pointer to AVCCommonObj.\"\n    \\param \"stream\" \"Pointer to AVCEncBitstream.\"\n    \\param \"sliceHdr\" \"Pointer to AVCSliceHdr.\"\n    \\return \"AVCENC_SUCCESS for success and AVCENC_FAIL otherwise.\"\n    */\n    AVCEnc_Status dec_ref_pic_marking(AVCCommonObj *video, AVCEncBitstream *stream, AVCSliceHeader *sliceHdr);\n\n    /**\n    This function initializes the POC related variables and the POC syntax to be encoded\n    to the slice header derived from the disp_order and is_reference flag of the original\n    input frame to be encoded.\n    \\param \"video\"  \"Pointer to the AVCEncObject.\"\n    \\return \"AVCENC_SUCCESS if success,\n            AVCENC_POC_FAIL if the poc type is undefined or\n            AVCENC_CONSECUTIVE_NONREF if there are consecutive non-reference frame for POC type 2.\"\n    */\n    AVCEnc_Status InitPOC(AVCEncObject *video);\n\n    /**\n    This function performs POC related operation after a picture is decoded.\n    \\param \"video\" \"Pointer to AVCCommonObj.\"\n    \\return \"AVCENC_SUCCESS\"\n    */\n    AVCEnc_Status PostPOC(AVCCommonObj *video);\n\n    /*----------- bitstream_io.c ----------------*/\n    /**\n    This function initializes the bitstream structure with the information given by\n    the users.\n    \\param \"bitstream\"  \"Pointer to the AVCEncBitstream structure.\"\n    \\param \"buffer\"     \"Pointer to the unsigned char buffer for output.\"\n    \\param \"buf_size\"   \"The size of the buffer in bytes.\"\n    \\param \"overrunBuffer\"  \"Pointer to extra overrun buffer.\"\n    \\param \"oBSize\"     \"Size of overrun buffer in bytes.\"\n    \\return \"AVCENC_SUCCESS if success, AVCENC_BITSTREAM_INIT_FAIL if fail\"\n    */\n    AVCEnc_Status BitstreamEncInit(AVCEncBitstream *bitstream, uint8 *buffer, int buf_size,\n                                   uint8 *overrunBuffer, int oBSize);\n\n    /**\n    This function writes the data from the cache into the bitstream buffer. It also adds the\n    emulation prevention code if necessary.\n    \\param \"stream\"     \"Pointer to the AVCEncBitstream structure.\"\n    \\return \"AVCENC_SUCCESS if success or AVCENC_BITSTREAM_BUFFER_FULL if fail.\"\n    */\n    AVCEnc_Status AVCBitstreamSaveWord(AVCEncBitstream *stream);\n\n    /**\n    This function writes the codeword into the cache which will eventually be written to\n    the bitstream buffer.\n    \\param \"stream\"     \"Pointer to the AVCEncBitstream structure.\"\n    \\param \"nBits\"      \"Number of bits in the codeword.\"\n    \\param \"code\"       \"The codeword.\"\n    \\return \"AVCENC_SUCCESS if success or AVCENC_BITSTREAM_BUFFER_FULL if fail.\"\n    */\n    AVCEnc_Status BitstreamWriteBits(AVCEncBitstream *stream, int nBits, uint code);\n\n    /**\n    This function writes one bit of data into the cache which will eventually be written\n    to the bitstream buffer.\n    \\param \"stream\"     \"Pointer to the AVCEncBitstream structure.\"\n    \\param \"code\"       \"The codeword.\"\n    \\return \"AVCENC_SUCCESS if success or AVCENC_BITSTREAM_BUFFER_FULL if fail.\"\n    */\n    AVCEnc_Status BitstreamWrite1Bit(AVCEncBitstream *stream, uint code);\n\n    /**\n    This function adds trailing bits to the bitstream and reports back the final EBSP size.\n    \\param \"stream\"     \"Pointer to the AVCEncBitstream structure.\"\n    \\param \"nal_size\"   \"Output the final NAL size.\"\n    \\return \"AVCENC_SUCCESS if success or AVCENC_BITSTREAM_BUFFER_FULL if fail.\"\n    */\n    AVCEnc_Status BitstreamTrailingBits(AVCEncBitstream *bitstream, uint *nal_size);\n\n    /**\n    This function checks whether the current bit position is byte-aligned or not.\n    \\param \"stream\" \"Pointer to the bitstream structure.\"\n    \\return \"true if byte-aligned, false otherwise.\"\n    */\n    bool byte_aligned(AVCEncBitstream *stream);\n\n\n    /**\n    This function checks the availability of overrun buffer and switches to use it when\n    normal bufffer is not big enough.\n    \\param \"stream\" \"Pointer to the bitstream structure.\"\n    \\param \"numExtraBytes\" \"Number of extra byte needed.\"\n    \\return \"AVCENC_SUCCESS or AVCENC_FAIL.\"\n    */\n    AVCEnc_Status AVCBitstreamUseOverrunBuffer(AVCEncBitstream* stream, int numExtraBytes);\n\n\n    /*-------------- intra_est.c ---------------*/\n\n    /** This function performs intra/inter decision based on ABE.\n    \\param \"encvid\" \"Pointer to AVCEncObject.\"\n    \\param \"min_cost\"   \"Best inter cost.\"\n    \\param \"curL\"   \"Pointer to the current MB origin in reconstructed frame.\"\n    \\param \"picPitch\" \"Pitch of the reconstructed frame.\"\n    \\return \"Boolean for intra mode.\"\n    */\n\n//bool IntraDecisionABE(AVCEncObject *encvid, int min_cost, uint8 *curL, int picPitch);\n    bool IntraDecision(int *min_cost, uint8 *cur, int pitch, bool ave);\n\n    /**\n    This function performs intra prediction mode search.\n    \\param \"encvid\" \"Pointer to AVCEncObject.\"\n    \\param \"mbnum\"  \"Current MB number.\"\n    \\param \"curL\"   \"Pointer to the current MB origin in reconstructed frame.\"\n    \\param \"picPitch\" \"Pitch of the reconstructed frame.\"\n    \\return \"void.\"\n    */\n    void MBIntraSearch(AVCEncObject *encvid, int mbnum, uint8 *curL, int picPitch);\n\n    /**\n    This function generates all the I16 prediction modes for an MB and keep it in\n    encvid->pred_i16.\n    \\param \"encvid\" \"Pointer to AVCEncObject.\"\n    \\return \"void\"\n    */\n    void intrapred_luma_16x16(AVCEncObject *encvid);\n\n    /**\n    This function calculate the cost of all I16 modes and compare them to get the minimum.\n    \\param \"encvid\" \"Pointer to AVCEncObject.\"\n    \\param \"orgY\"   \"Pointer to the original luma MB.\"\n    \\param \"min_cost\" \"Pointer to the minimal cost so-far.\"\n    \\return \"void\"\n    */\n    void find_cost_16x16(AVCEncObject *encvid, uint8 *orgY, int *min_cost);\n\n    /**\n    This function calculates the cost of each I16 mode.\n    \\param \"org\"    \"Pointer to the original luma MB.\"\n    \\param \"org_pitch\" \"Stride size of the original frame.\"\n    \\param \"pred\"   \"Pointer to the prediction values.\"\n    \\param \"min_cost\" \"Minimal cost so-far.\"\n    \\return \"Cost\"\n    */\n\n    int cost_i16(uint8 *org, int org_pitch, uint8 *pred, int min_cost);\n\n    /**\n    This function generates all the I4 prediction modes and select the best one\n    for all the blocks inside a macroblock.It also calls dct_luma to generate the reconstructed\n    MB, and transform coefficients to be encoded.\n    \\param \"encvid\" \"Pointer to AVCEncObject.\"\n    \\param \"min_cost\" \"Pointer to the minimal cost so-far.\"\n    \\return \"void\"\n    */\n    void mb_intra4x4_search(AVCEncObject *encvid, int *min_cost);\n\n    /**\n    This function calculates the most probable I4 mode of a given 4x4 block\n    from neighboring informationaccording to AVC/H.264 standard.\n    \\param \"video\"  \"Pointer to AVCCommonObj.\"\n    \\param \"blkidx\" \"The current block index.\"\n    \\return \"Most probable mode.\"\n    */\n    int FindMostProbableI4Mode(AVCCommonObj *video, int blkidx);\n\n    /**\n    This function is where a lot of actions take place in the 4x4 block level inside\n    mb_intra4x4_search.\n    \\param \"encvid\" \"Pointer to AVCEncObject.\"\n    \\param \"blkidx\" \"The current 4x4 block index.\"\n    \\param \"cur\"    \"Pointer to the reconstructed block.\"\n    \\param \"org\"    \"Pointer to the original block.\"\n    \\return \"Minimal cost, also set currMB->i4Mode\"\n    */\n    int blk_intra4x4_search(AVCEncObject *encvid, int blkidx, uint8 *cur, uint8 *org);\n\n    /**\n    This function calculates the cost of a given I4 prediction mode.\n    \\param \"org\"    \"Pointer to the original block.\"\n    \\param \"org_pitch\"  \"Stride size of the original frame.\"\n    \\param \"pred\"   \"Pointer to the prediction block. (encvid->pred_i4)\"\n    \\param \"cost\"   \"Pointer to the minimal cost (to be updated).\"\n    \\return \"void\"\n    */\n    void cost_i4(uint8 *org, int org_pitch, uint8 *pred, uint16 *cost);\n\n    /**\n    This function performs chroma intra search. Each mode is saved in encvid->pred_ic.\n    \\param \"encvid\" \"Pointer to AVCEncObject.\"\n    \\return \"void\"\n    */\n    void chroma_intra_search(AVCEncObject *encvid);\n\n    /**\n    This function calculates the cost of a chroma prediction mode.\n    \\param \"orgCb\"  \"Pointer to the original Cb block.\"\n    \\param \"orgCr\"  \"Pointer to the original Cr block.\"\n    \\param \"org_pitch\"  \"Stride size of the original frame.\"\n    \\param \"pred\"   \"Pointer to the prediction block (encvid->pred_ic)\"\n    \\param \"mincost\"    \"Minimal cost so far.\"\n    \\return \"Cost.\"\n    */\n\n    int SATDChroma(uint8 *orgCb, uint8 *orgCr, int org_pitch, uint8 *pred, int mincost);\n\n    /*-------------- motion_comp.c ---------------*/\n\n    /**\n    This is a main function to peform inter prediction.\n    \\param \"encvid\"     \"Pointer to AVCEncObject.\"\n    \\param \"video\"      \"Pointer to AVCCommonObj.\"\n    \\return \"void\".\n    */\n    void AVCMBMotionComp(AVCEncObject *encvid, AVCCommonObj *video);\n\n\n    /**\n    This function is called for luma motion compensation.\n    \\param \"ref\"    \"Pointer to the origin of a reference luma.\"\n    \\param \"picwidth\"   \"Width of the picture.\"\n    \\param \"picheight\"  \"Height of the picture.\"\n    \\param \"x_pos\"  \"X-coordinate of the predicted block in quarter pel resolution.\"\n    \\param \"y_pos\"  \"Y-coordinate of the predicted block in quarter pel resolution.\"\n    \\param \"pred\"   \"Pointer to the output predicted block.\"\n    \\param \"pred_pitch\" \"Width of pred.\"\n    \\param \"blkwidth\"   \"Width of the current partition.\"\n    \\param \"blkheight\"  \"Height of the current partition.\"\n    \\return \"void\"\n    */\n    void eLumaMotionComp(uint8 *ref, int picwidth, int picheight,\n                         int x_pos, int y_pos,\n                         uint8 *pred, int pred_pitch,\n                         int blkwidth, int blkheight);\n\n    void eFullPelMC(uint8 *in, int inwidth, uint8 *out, int outpitch,\n                    int blkwidth, int blkheight);\n\n    void eHorzInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch,\n                        int blkwidth, int blkheight, int dx);\n\n    void eHorzInterp2MC(int *in, int inpitch, uint8 *out, int outpitch,\n                        int blkwidth, int blkheight, int dx);\n\n    void eHorzInterp3MC(uint8 *in, int inpitch, int *out, int outpitch,\n                        int blkwidth, int blkheight);\n\n    void eVertInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch,\n                        int blkwidth, int blkheight, int dy);\n\n    void eVertInterp2MC(uint8 *in, int inpitch, int *out, int outpitch,\n                        int blkwidth, int blkheight);\n\n    void eVertInterp3MC(int *in, int inpitch, uint8 *out, int outpitch,\n                        int blkwidth, int blkheight, int dy);\n\n    void eDiagonalInterpMC(uint8 *in1, uint8 *in2, int inpitch,\n                           uint8 *out, int outpitch,\n                           int blkwidth, int blkheight);\n\n    void eChromaMotionComp(uint8 *ref, int picwidth, int picheight,\n                           int x_pos, int y_pos, uint8 *pred, int pred_pitch,\n                           int blkwidth, int blkheight);\n\n    void eChromaDiagonalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                                uint8 *pOut, int predPitch, int blkwidth, int blkheight);\n\n    void eChromaHorizontalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                                  uint8 *pOut, int predPitch, int blkwidth, int blkheight);\n\n    void eChromaVerticalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                                uint8 *pOut, int predPitch, int blkwidth, int blkheight);\n\n    void eChromaFullMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                            uint8 *pOut, int predPitch, int blkwidth, int blkheight);\n\n    void eChromaVerticalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                                 uint8 *pOut, int predPitch, int blkwidth, int blkheight);\n\n    void eChromaHorizontalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                                   uint8 *pOut, int predPitch, int blkwidth, int blkheight);\n\n    void eChromaDiagonalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                                 uint8 *pOut, int predPitch, int blkwidth, int blkheight);\n\n\n    /*-------------- motion_est.c ---------------*/\n\n    /**\n    Allocate and initialize arrays necessary for motion search algorithm.\n    \\param \"envid\" \"Pointer to AVCEncObject.\"\n    \\return \"AVC_SUCCESS or AVC_MEMORY_FAIL.\"\n    */\n    AVCEnc_Status InitMotionSearchModule(AVCHandle *avcHandle);\n\n    /**\n    Clean up memory allocated in InitMotionSearchModule.\n    \\param \"envid\" \"Pointer to AVCEncObject.\"\n    \\return \"void.\"\n    */\n    void CleanMotionSearchModule(AVCHandle *avcHandle);\n\n\n    /**\n    This function performs motion estimation of all macroblocks in a frame during the InitFrame.\n    The goal is to find the best MB partition for inter and find out if intra search is needed for\n    any MBs. This intra MB tendency can be used for scene change detection.\n    \\param \"encvid\" \"Pointer to AVCEncObject.\"\n    \\return \"void\"\n    */\n    void AVCMotionEstimation(AVCEncObject *encvid);\n\n    /**\n    This function performs repetitive edge padding to the reference picture by adding 16 pixels\n    around the luma and 8 pixels around the chromas.\n    \\param \"refPic\" \"Pointer to the reference picture.\"\n    \\return \"void\"\n    */\n    void  AVCPaddingEdge(AVCPictureData *refPic);\n\n    /**\n    This function keeps track of intra refresh macroblock locations.\n    \\param \"encvid\" \"Pointer to the global array structure AVCEncObject.\"\n    \\param \"mblock\" \"Pointer to the array of AVCMacroblock structures.\"\n    \\param \"totalMB\" \"Total number of MBs in a frame.\"\n    \\param \"numRefresh\" \"Number of MB to be intra refresh in a single frame.\"\n    \\return \"void\"\n    */\n    void AVCRasterIntraUpdate(AVCEncObject *encvid, AVCMacroblock *mblock, int totalMB, int numRefresh);\n\n#ifdef HTFM\n    void InitHTFM(VideoEncData *encvid, HTFM_Stat *htfm_stat, double *newvar, int *collect);\n    void UpdateHTFM(AVCEncObject *encvid, double *newvar, double *exp_lamda, HTFM_Stat *htfm_stat);\n    void CalcThreshold(double pf, double exp_lamda[], int nrmlz_th[]);\n    void    HTFMPrepareCurMB_AVC(AVCEncObject *encvid, HTFM_Stat *htfm_stat, uint8 *cur, int pitch);\n#endif\n\n    /**\n    This function reads the input MB into a smaller faster memory space to minimize the cache miss.\n    \\param \"encvid\" \"Pointer to the global AVCEncObject.\"\n    \\param \"cur\"    \"Pointer to the original input macroblock.\"\n    \\param \"pitch\"  \"Stride size of the input frame (luma).\"\n    \\return \"void\"\n    */\n    void    AVCPrepareCurMB(AVCEncObject *encvid, uint8 *cur, int pitch);\n\n    /**\n    Performs motion vector search for a macroblock.\n    \\param \"encvid\" \"Pointer to AVCEncObject structure.\"\n    \\param \"cur\"    \"Pointer to the current macroblock in the input frame.\"\n    \\param \"best_cand\" \"Array of best candidates (to be filled in and returned).\"\n    \\param \"i0\"     \"X-coordinate of the macroblock.\"\n    \\param \"j0\"     \"Y-coordinate of the macroblock.\"\n    \\param \"type_pred\" \"Indicates the type of operations.\"\n    \\param \"FS_en\"      \"Flag for fullsearch enable.\"\n    \\param \"hp_guess\"   \"Guess for half-pel search.\"\n    \\return \"void\"\n    */\n    void AVCMBMotionSearch(AVCEncObject *encvid, uint8 *cur, uint8 *best_cand[],\n                           int i0, int j0, int type_pred, int FS_en, int *hp_guess);\n\n//AVCEnc_Status AVCMBMotionSearch(AVCEncObject *encvid, AVCMacroblock *currMB, int mbNum,\n//                           int num_pass);\n\n    /**\n    Perform full-pel exhaustive search around the predicted MV.\n    \\param \"encvid\" \"Pointer to AVCEncObject structure.\"\n    \\param \"prev\"   \"Pointer to the reference frame.\"\n    \\param \"cur\"    \"Pointer to the input macroblock.\"\n    \\param \"imin\"   \"Pointer to minimal mv (x).\"\n    \\param \"jmin\"   \"Pointer to minimal mv (y).\"\n    \\param \"ilow, ihigh, jlow, jhigh\"   \"Lower bound on search range.\"\n    \\param \"cmvx, cmvy\" \"Predicted MV value.\"\n\n    \\return \"The cost function of the best candidate.\"\n    */\n    int AVCFullSearch(AVCEncObject *encvid, uint8 *prev, uint8 *cur,\n                      int *imin, int *jmin, int ilow, int ihigh, int jlow, int jhigh,\n                      int cmvx, int cmvy);\n\n    /**\n    Select candidates from neighboring blocks according to the type of the\n    prediction selection.\n    \\param \"mvx\"    \"Pointer to the candidate, x-coordinate.\"\n    \\param \"mvy\"    \"Pointer to the candidate, y-coordinate.\"\n    \\param \"num_can\"    \"Pointer to the number of candidates returned.\"\n    \\param \"imb\"    \"The MB index x-coordinate.\"\n    \\param \"jmb\"    \"The MB index y-coordinate.\"\n    \\param \"type_pred\"  \"Type of the prediction.\"\n    \\param \"cmvx, cmvy\" \"Pointer to predicted MV (modified version).\"\n    \\return \"void.\"\n    */\n    void AVCCandidateSelection(int *mvx, int *mvy, int *num_can, int imb, int jmb,\n                               AVCEncObject *encvid, int type_pred, int *cmvx, int *cmvy);\n\n    /**\n    Utility function to move the values in the array dn according to the new\n    location to avoid redundant calculation.\n    \\param \"dn\" \"Array of integer of size 9.\"\n    \\param \"new_loc\"    \"New location index.\"\n    \\return \"void.\"\n    */\n    void AVCMoveNeighborSAD(int dn[], int new_loc);\n\n    /**\n    Find minimum index of dn.\n    \\param \"dn\" \"Array of integer of size 9.\"\n    \\return \"The index of dn with the smallest dn[] value.\"\n    */\n    int AVCFindMin(int dn[]);\n\n    /*------------- findhalfpel.c -------------------*/\n\n    /**\n    Search for the best half-pel resolution MV around the full-pel MV.\n    \\param \"encvid\" \"Pointer to the global AVCEncObject structure.\"\n    \\param \"cur\"    \"Pointer to the current macroblock.\"\n    \\param \"mot\"    \"Pointer to the AVCMV array of the frame.\"\n    \\param \"ncand\"  \"Pointer to the origin of the fullsearch result.\"\n    \\param \"xpos\"   \"The current MB position in x.\"\n    \\param \"ypos\"   \"The current MB position in y.\"\n    \\param \"hp_guess\"   \"Input to help speedup the search.\"\n    \\param \"cmvx, cmvy\" \"Predicted motion vector use for mvcost.\"\n    \\return \"Minimal cost (SATD) without MV cost. (for rate control purpose)\"\n    */\n    int AVCFindHalfPelMB(AVCEncObject *encvid, uint8 *cur, AVCMV *mot, uint8 *ncand,\n                         int xpos, int ypos, int hp_guess, int cmvx, int cmvy);\n\n    /**\n    This function generates sub-pel pixels required to do subpel MV search.\n    \\param \"subpel_pred\" \"Pointer to 2-D array, each array for each position.\"\n    \\param \"ncand\" \"Pointer to the full-pel center position in ref frame.\"\n    \\param \"lx\" \"Pitch of the ref frame.\"\n    \\return \"void\"\n     */\n    void GenerateHalfPelPred(uint8 *subpel_pred, uint8 *ncand, int lx);\n\n    /**\n    This function calculate vertical interpolation at half-point of size 4x17.\n    \\param \"dst\" \"Pointer to destination.\"\n    \\param \"ref\" \"Pointer to the starting reference pixel.\"\n    \\return \"void.\"\n    */\n    void VertInterpWClip(uint8 *dst, uint8 *ref);\n\n    /**\n    This function generates quarter-pel pixels around the best half-pel result\n    during the sub-pel MV search.\n    \\param \"bilin_base\"  \"Array of pointers to be used as basis for q-pel interp.\"\n    \\param \"qpel_pred\"  \"Array of pointers pointing to quarter-pel candidates.\"\n    \\param \"hpel_pos\" \"Best half-pel position at the center.\"\n    \\return \"void\"\n    */\n    void GenerateQuartPelPred(uint8 **bilin_base, uint8 *qpel_pred, int hpel_pos);\n\n    /**\n    This function calculates the SATD of a subpel candidate.\n    \\param \"cand\"   \"Pointer to a candidate.\"\n    \\param \"cur\"    \"Pointer to the current block.\"\n    \\param \"dmin\"   \"Min-so-far SATD.\"\n    \\return \"Sum of Absolute Transformed Difference.\"\n    */\n    int SATD_MB(uint8 *cand, uint8 *cur, int dmin);\n\n    /*------------- rate_control.c -------------------*/\n\n    /** This function is a utility function. It returns average QP of the previously encoded frame.\n    \\param \"rateCtrl\" \"Pointer to AVCRateControl structure.\"\n    \\return \"Average QP.\"\n    */\n    int GetAvgFrameQP(AVCRateControl *rateCtrl);\n\n    /**\n    This function takes the timestamp of the input and determine whether it should be encoded\n    or skipped.\n    \\param \"encvid\" \"Pointer to the AVCEncObject structure.\"\n    \\param \"rateCtrl\"   \"Pointer to the AVCRateControl structure.\"\n    \\param \"modTime\"    \"The 32 bit timestamp of the input frame.\"\n    \\param \"frameNum\"   \"Pointer to the frame number if to be encoded.\"\n    \\return \"AVC_SUCCESS or else.\"\n    */\n    AVCEnc_Status RCDetermineFrameNum(AVCEncObject *encvid, AVCRateControl *rateCtrl, uint32 modTime, uint *frameNum);\n\n    /**\n    This function updates the buffer fullness when frames are dropped either by the\n    rate control algorithm or by the users to make sure that target bit rate is still met.\n    \\param \"video\" \"Pointer to the common object structure.\"\n    \\param \"rateCtrl\" \"Pointer to rate control structure.\"\n    \\param \"frameInc\" \"Difference of the current frame number and previous frame number.\"\n    \\return \"void.\"\n    */\n    void RCUpdateBuffer(AVCCommonObj *video, AVCRateControl *rateCtrl, int frameInc);\n\n    /**\n    This function initializes rate control module and allocates necessary bufferes to do the job.\n    \\param \"avcHandle\" \"Pointer to the encoder handle.\"\n    \\return \"AVCENC_SUCCESS or AVCENC_MEMORY_FAIL.\"\n    */\n    AVCEnc_Status InitRateControlModule(AVCHandle *avcHandle);\n\n    /**\n    This function frees buffers allocated in InitRateControlModule.\n    \\param \"avcHandle\" \"Pointer to the encoder handle.\"\n    \\return \"void.\"\n    */\n    void CleanupRateControlModule(AVCHandle *avcHandle);\n\n    /**\n    This function is called at the beginning of each GOP or the first IDR frame. It calculates\n    target bits for a GOP.\n    \\param \"encvid\" \"Pointer to the encoder object.\"\n    \\return \"void.\"\n    */\n    void RCInitGOP(AVCEncObject *encvid);\n\n    /**\n    This function calculates target bits for a particular frame.\n    \\param \"video\"  \"Pointer to the AVCEncObject structure.\"\n    \\return \"void\"\n    */\n    void RCInitFrameQP(AVCEncObject *video);\n\n    /**\n    This function calculates QP for the upcoming frame or basic unit.\n    \\param \"encvid\" \"Pointer to the encoder object.\"\n    \\param \"rateCtrl\" \"Pointer to the rate control object.\"\n    \\return \"QP value ranging from 0-51.\"\n    */\n    int  RCCalculateQP(AVCEncObject *encvid, AVCRateControl *rateCtrl);\n\n    /**\n    This function translates the luma QP to chroma QP and calculates lambda based on QP.\n    \\param \"video\"  \"Pointer to the AVCEncObject structure.\"\n    \\return \"void\"\n    */\n    void RCInitChromaQP(AVCEncObject *encvid);\n\n    /**\n    This function is called before encoding each macroblock.\n    \\param \"encvid\" \"Pointer to the encoder object.\"\n    \\return \"void.\"\n    */\n    void RCInitMBQP(AVCEncObject *encvid);\n\n    /**\n    This function updates bits usage stats after encoding an macroblock.\n    \\param \"video\" \"Pointer to AVCCommonObj.\"\n    \\param \"rateCtrl\" \"Pointer to AVCRateControl.\"\n    \\param \"num_header_bits\" \"Number of bits used for MB header.\"\n    \\param \"num_texture_bits\" \"Number of bits used for MB texture.\"\n    \\return \"void\"\n    */\n    void RCPostMB(AVCCommonObj *video, AVCRateControl *rateCtrl, int num_header_bits, int num_texture_bits);\n\n    /**\n    This function calculates the difference between prediction and original MB.\n    \\param \"encvid\" \"Pointer to the encoder object.\"\n    \\param \"currMB\" \"Pointer to the current macroblock structure.\"\n    \\param \"orgL\" \"Pointer to the original MB.\"\n    \\param \"orgPitch\" \"Pointer to the original picture pitch.\"\n    \\return \"void.\"\n    */\n    void RCCalculateMAD(AVCEncObject *encvid, AVCMacroblock *currMB, uint8 *orgL, int orgPitch);\n\n    /**\n    Restore QP related parameters of previous MB when current MB is skipped.\n    \\param \"currMB\" \"Pointer to the current macroblock.\"\n    \\param \"video\"  \"Pointer to the common video structure.\"\n    \\param \"encvid\" \"Pointer to the global encoding structure.\"\n    \\return \"void\"\n    */\n    void RCRestoreQP(AVCMacroblock *currMB, AVCCommonObj *video, AVCEncObject *encvid);\n\n    /**\n    This function is called after done with a frame.\n    \\param \"encvid\" \"Pointer to the encoder object.\"\n    \\return \"AVCENC_SUCCESS or AVCENC_SKIPPED_PICTURE when bufer overflow (need to discard current frame).\"\n    */\n    AVCEnc_Status RCUpdateFrame(AVCEncObject *encvid);\n\n    /**\n    This function is called to update the RC internal variables when bit rate/frame rate is changed.\n    \\param \"rateCtrl\"   \"Pointer to the rate control structure.\"\n    \\param \"encvid\"     \"Pointer to AVCEncObject.\"\n    \\return \"void\"\n    */\n    void RCUpdateParams(AVCRateControl *rateCtrl, AVCEncObject *encvid);\n\n\n    /*--------- residual.c -------------------*/\n\n    /**\n    This function encodes the intra pcm data and fill it in the corresponding location\n    on the current picture.\n    \\param \"video\"  \"Pointer to AVCEncObject.\"\n    \\return \"AVCENC_SUCCESS if success, or else for bitstream errors.\"\n    */\n    AVCEnc_Status EncodeIntraPCM(AVCEncObject *video);\n\n    /**\n    This function performs CAVLC syntax encoding on the run and level information of the coefficients.\n    The level and run arrays are elements in AVCEncObject structure, populated by TransQuantZZ,\n    TransQuantIntraDC and TransQuantChromaDC functions.\n    \\param \"video\"  \"Pointer to AVCEncObject.\"\n    \\param \"type\"   \"One of AVCResidualType for a particular 4x4 block.\"\n    \\param \"bindx\"  \"Block index or number of nonzero coefficients for AVC_Intra16DC and AVC_ChromaDC mode.\"\n    \\param \"currMB\" \"Pointer to the current macroblock structure.\"\n    \\return \"AVCENC_SUCCESS for success.\"\n    \\Note   \"This function has 32-bit machine specific instruction!!!!\"\n    */\n    AVCEnc_Status enc_residual_block(AVCEncObject *encvid, AVCResidualType type, int bindx, AVCMacroblock *currMB);\n\n\n    /*------------- sad.c ---------------------------*/\n\n\n    int AVCSAD_MB_HalfPel_Cxhyh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);\n    int AVCSAD_MB_HalfPel_Cyh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);\n    int AVCSAD_MB_HalfPel_Cxh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);\n    int AVCSAD_Macroblock_C(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);\n\n#ifdef HTFM /*  3/2/1, Hypothesis Testing Fast Matching */\n    int AVCSAD_MB_HP_HTFM_Collectxhyh(uint8 *ref, uint8 *blk, int dmin_x, void *extra_info);\n    int AVCSAD_MB_HP_HTFM_Collectyh(uint8 *ref, uint8 *blk, int dmin_x, void *extra_info);\n    int AVCSAD_MB_HP_HTFM_Collectxh(uint8 *ref, uint8 *blk, int dmin_x, void *extra_info);\n    int AVCSAD_MB_HP_HTFMxhyh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);\n    int AVCSAD_MB_HP_HTFMyh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);\n    int AVCSAD_MB_HP_HTFMxh(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);\n    int AVCSAD_MB_HTFM_Collect(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);\n    int AVCSAD_MB_HTFM(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info);\n#endif\n\n\n    /*------------- slice.c -------------------------*/\n\n    /**\n    This function performs the main encoding loop for a slice.\n    \\param \"encvid\" \"Pointer to AVCEncObject.\"\n    \\return \"AVCENC_SUCCESS for success, AVCENC_PICTURE_READY for end-of-picture and\n             AVCENC_FAIL or AVCENC_SLICE_EMPTY otherwise.\"\n    */\n    AVCEnc_Status AVCEncodeSlice(AVCEncObject *encvid);\n\n    /**\n    This function performs the main encoding operation for one macroblock.\n    \\param \"video\" \"pointer to AVCEncObject.\"\n    \\return \"AVCENC_SUCCESS for success, or other bitstream related failure status.\"\n    */\n    AVCEnc_Status EncodeMB(AVCEncObject *video);\n\n    /**\n    This function calls prediction INTRA/INTER functions, transform,\n    quantization and zigzag scanning to get the run-level symbols.\n    \\param \"encvid\" \"pointer to AVCEncObject.\"\n    \\param \"curL\"   \"pointer to Luma component of the current frame.\n    \\param \"curCb\"  \"pointer to Cb component of the current frame.\n    \\param \"curCr\"  \"pointer to Cr component of the current frame.\n    \\return \"void for now.\"\n     */\n    void MBPredTransQuantZZ(AVCEncObject *encvid, uint8 *curL, uint8 *curCb, uint8 *curCr);\n\n    /**\n    This function copies the content of the prediction MB into the reconstructed YUV\n    frame directly.\n    \\param \"curL\"   \"Pointer to the destination Y component.\"\n    \\param \"curCb\"  \"Pointer to the destination Cb component.\"\n    \\param \"curCr\"  \"Pointer to the destination Cr component.\"\n    \\param \"predBlock\"  \"Pointer to the prediction MB.\"\n    \\param \"picWidth\"   \"The width of the frame.\"\n    \\return \"None.\"\n    */\n    void Copy_MB(uint8 *curL, uint8 *curCb, uint8 *curCr, uint8 *predBlock, int picWidth);\n\n    /**\n    This function encodes the mb_type, CBP, prediction mode, ref idx and MV.\n    \\param \"currMB\" \"Pointer to the current macroblock structure.\"\n    \\param \"video\" \"Pointer to the AVCEncObject structure.\"\n    \\return \"AVCENC_SUCCESS for success or else for fail.\"\n    */\n    AVCEnc_Status EncodeMBHeader(AVCMacroblock *currMB, AVCEncObject *video);\n\n    /**\n    This function finds the right mb_type for a macroblock given the mbMode, CBP,\n    NumPart, PredPartMode.\n    \\param \"currMB\" \"Pointer to the current macroblock structure.\"\n    \\param \"slice_type\" \"Value of the slice_type.\"\n    \\return \"mb_type.\"\n    */\n    uint InterpretMBType(AVCMacroblock *currMB, int slice_type);\n\n    /**\n    This function encodes the mb_pred part of the macroblock data.\n    \\param \"video\"  \"Pointer to the AVCCommonObj structure.\"\n    \\param \"currMB\" \"Pointer to the current macroblock structure.\"\n    \\param \"stream\" \"Pointer to the AVCEncBitstream structure.\"\n    \\return \"AVCENC_SUCCESS for success or bitstream fail status.\"\n    */\n    AVCEnc_Status mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream);\n\n    /**\n    This function encodes the sub_mb_pred part of the macroblock data.\n    \\param \"video\"  \"Pointer to the AVCCommonObj structure.\"\n    \\param \"currMB\" \"Pointer to the current macroblock structure.\"\n    \\param \"stream\" \"Pointer to the AVCEncBitstream structure.\"\n    \\return \"AVCENC_SUCCESS for success or bitstream fail status.\"\n    */\n    AVCEnc_Status sub_mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream);\n\n    /**\n    This function interprets the sub_mb_type and sets necessary information\n    when the slice type is AVC_P_SLICE.\n    in the macroblock structure.\n    \\param \"mblock\" \"Pointer to current AVCMacroblock.\"\n    \\param \"sub_mb_type\" \"From the syntax bitstream.\"\n    \\return \"void\"\n    */\n    void InterpretSubMBTypeP(AVCMacroblock *mblock, uint *sub_mb_type);\n\n    /**\n    This function interprets the sub_mb_type and sets necessary information\n    when the slice type is AVC_B_SLICE.\n    in the macroblock structure.\n    \\param \"mblock\" \"Pointer to current AVCMacroblock.\"\n    \\param \"sub_mb_type\" \"From the syntax bitstream.\"\n    \\return \"void\"\n    */\n    void InterpretSubMBTypeB(AVCMacroblock *mblock, uint *sub_mb_type);\n\n    /**\n    This function encodes intra 4x4 mode. It calculates the predicted I4x4 mode and the\n    remnant to be encoded.\n    \\param \"video\"  \"Pointer to AVCEncObject structure.\"\n    \\param \"currMB\" \"Pointer to the AVCMacroblock structure.\"\n    \\param \"stream\" \"Pointer to AVCEncBitstream sructure.\"\n    \\return \"AVCENC_SUCCESS for success.\"\n    */\n    AVCEnc_Status EncodeIntra4x4Mode(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream);\n\n    /*------------- vlc_encode.c -----------------------*/\n    /**\n    This function encodes and writes a value into an Exp-Golomb codeword.\n    \\param \"bitstream\" \"Pointer to AVCEncBitstream.\"\n    \\param \"codeNum\" \"Pointer to the value of the codeNum.\"\n    \\return \"AVCENC_SUCCESS for success or bitstream error messages for fail.\"\n    */\n    AVCEnc_Status ue_v(AVCEncBitstream *bitstream, uint codeNum);\n\n    /**\n    This function maps and encodes signed Exp-Golomb codes.\n    \\param \"bitstream\" \"Pointer to AVCEncBitstream.\"\n    \\param \"value\"  \"Pointer to syntax element value.\"\n    \\return \"AVCENC_SUCCESS or AVCENC_FAIL.\"\n    */\n    AVCEnc_Status  se_v(AVCEncBitstream *bitstream, int value);\n\n    /**\n    This function maps and encodes truncated Exp-Golomb codes.\n    \\param \"bitstream\" \"Pointer to AVCEncBitstream.\"\n    \\param \"value\"  \"Pointer to syntax element value.\"\n    \\param \"range\"  \"Range of the value as input to determine the algorithm.\"\n    \\return \"AVCENC_SUCCESS or AVCENC_FAIL.\"\n    */\n    AVCEnc_Status te_v(AVCEncBitstream *bitstream, uint value, uint range);\n\n    /**\n    This function creates Exp-Golomb codeword from codeNum.\n    \\param \"bitstream\" \"Pointer to AVCEncBitstream.\"\n    \\param \"codeNum\" \"Pointer to the codeNum value.\"\n    \\return \"AVCENC_SUCCESS for success or bitstream error messages for fail.\"\n    */\n    AVCEnc_Status SetEGBitstring(AVCEncBitstream *bitstream, uint codeNum);\n\n    /**\n    This function performs CAVLC encoding of the CBP (coded block pattern) of a macroblock\n    by calling ue_v() and then mapping the CBP to the corresponding VLC codeNum.\n    \\param \"currMB\"  \"Pointer to the current AVCMacroblock structure.\"\n    \\param \"stream\"  \"Pointer to the AVCEncBitstream.\"\n    \\return \"void\"\n    */\n    AVCEnc_Status EncodeCBP(AVCMacroblock *currMB, AVCEncBitstream *stream);\n\n    /**\n    This function encodes trailing ones and total coefficient.\n    \\param \"stream\" \"Pointer to the AVCEncBitstream.\"\n    \\param \"TrailingOnes\"   \"The trailing one variable output.\"\n    \\param \"TotalCoeff\" \"The total coefficient variable output.\"\n    \\param \"nC\" \"Context for number of nonzero coefficient (prediction context).\"\n    \\return \"AVCENC_SUCCESS for success or else for bitstream failure.\"\n    */\n    AVCEnc_Status ce_TotalCoeffTrailingOnes(AVCEncBitstream *stream, int TrailingOnes, int TotalCoeff, int nC);\n\n    /**\n    This function encodes trailing ones and total coefficient for chroma DC block.\n    \\param \"stream\" \"Pointer to the AVCEncBitstream.\"\n    \\param \"TrailingOnes\"   \"The trailing one variable output.\"\n    \\param \"TotalCoeff\" \"The total coefficient variable output.\"\n    \\return \"AVCENC_SUCCESS for success or else for bitstream failure.\"\n    */\n    AVCEnc_Status ce_TotalCoeffTrailingOnesChromaDC(AVCEncBitstream *stream, int TrailingOnes, int TotalCoeff);\n\n    /**\n    This function encodes total_zeros value as in Table 9-7 and 9-8.\n    \\param \"stream\" \"Pointer to the AVCEncBitstream.\"\n    \\param \"TotalZeros\" \"The total_zeros value.\"\n    \\param \"TotalCoeff\" \"The total coefficient variable output.\"\n    \\return \"AVCENC_SUCCESS for success or else for bitstream failure.\"\n    */\n    AVCEnc_Status ce_TotalZeros(AVCEncBitstream *stream, int total_zeros, int TotalCoeff);\n\n    /**\n    This function encodes total_zeros VLC syntax for chroma DC as in Table 9-9.\n    \\param \"stream\" \"Pointer to the AVCEncBitstream.\"\n    \\param \"TotalZeros\" \"The total_zeros value.\"\n    \\param \"TotalCoeff\" \"The total coefficient variable output.\"\n    \\return \"AVCENC_SUCCESS for success or else for bitstream failure.\"\n    */\n    AVCEnc_Status ce_TotalZerosChromaDC(AVCEncBitstream *stream, int total_zeros, int TotalCoeff);\n\n    /**\n    This function encodes run_before VLC syntax as in Table 9-10.\n    \\param \"stream\" \"Pointer to the AVCEncBitstream.\"\n    \\param \"run_before\" \"The run_before value.\"\n    \\param \"zerosLeft\"  \"The context for number of zeros left.\"\n    \\return \"AVCENC_SUCCESS for success or else for bitstream failure.\"\n    */\n    AVCEnc_Status ce_RunBefore(AVCEncBitstream *stream, int run_before, int zerosLeft);\n\n#ifdef __cplusplus\n}\n#endif\n\n\n#endif /* _AVCENC_LIB_H_ */\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/bitstream_io.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"avcenc_lib.h\"\n#include \"oscl_mem.h\"\n\n#define WORD_SIZE 32\n\n/* array for trailing bit pattern as function of number of bits */\n/* the first one is unused. */\nconst static uint8 trailing_bits[9] = {0, 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80};\n\n/* ======================================================================== */\n/*  Function : BitstreamInit()                                              */\n/*  Date     : 11/4/2003                                                    */\n/*  Purpose  : Populate bitstream structure with bitstream buffer and size  */\n/*             it also initializes internal data                            */\n/*  In/out   :                                                              */\n/*  Return   : AVCENC_SUCCESS if successed, AVCENC_FAIL if failed.              */\n/*  Modified :                                                              */\n/* ======================================================================== */\n/* |--------|--------|----~~~~~-----|---------|---------|---------|\n   ^                                          ^write_pos          ^buf_size\n   bitstreamBuffer                  <--------->\n                                    current_word\n\n   |-----xxxxxxxxxxxxx|  = current_word 32 or 16 bits\n    <---->\n     bit_left\n ======================================================================== */\n\nAVCEnc_Status BitstreamEncInit(AVCEncBitstream *stream, uint8 *buffer, int buf_size,\n                               uint8 *overrunBuffer, int oBSize)\n{\n    if (stream == NULL || buffer == NULL || buf_size <= 0)\n    {\n        return AVCENC_BITSTREAM_INIT_FAIL;\n    }\n\n    stream->bitstreamBuffer = buffer;\n\n    stream->buf_size = buf_size;\n\n    stream->write_pos = 0;\n\n    stream->count_zeros = 0;\n\n    stream->current_word = 0;\n\n    stream->bit_left = WORD_SIZE;\n\n    stream->overrunBuffer = overrunBuffer;\n\n    stream->oBSize = oBSize;\n\n    return AVCENC_SUCCESS;\n}\n\n/* ======================================================================== */\n/*  Function : AVCBitstreamSaveWord()                                           */\n/*  Date     : 3/29/2004                                                    */\n/*  Purpose  : Save the current_word into the buffer, byte-swap, and        */\n/*              add emulation prevention insertion.                         */\n/*  In/out   :                                                              */\n/*  Return   : AVCENC_SUCCESS if successed, AVCENC_WRITE_FAIL if buffer is  */\n/*              full.                                                       */\n/*  Modified :                                                              */\n/* ======================================================================== */\nAVCEnc_Status AVCBitstreamSaveWord(AVCEncBitstream *stream)\n{\n    int num_bits;\n    uint8 *write_pnt, byte;\n    uint current_word;\n\n    /* check number of bytes in current_word, must always be byte-aligned!!!! */\n    num_bits = WORD_SIZE - stream->bit_left; /* must be multiple of 8 !!*/\n\n    if (stream->buf_size - stream->write_pos <= (num_bits >> 3) + 2) /* 2 more bytes for possible EPBS */\n    {\n        if (AVCENC_SUCCESS != AVCBitstreamUseOverrunBuffer(stream, (num_bits >> 3) + 2))\n        {\n            return AVCENC_BITSTREAM_BUFFER_FULL;\n        }\n    }\n\n    /* write word, byte-by-byte */\n    write_pnt = stream->bitstreamBuffer + stream->write_pos;\n    current_word = stream->current_word;\n    while (num_bits) /* no need to check stream->buf_size and stream->write_pos, taken care already */\n    {\n        num_bits -= 8;\n        byte = (current_word >> num_bits) & 0xFF;\n        if (byte != 0)\n        {\n            *write_pnt++ = byte;\n            stream->write_pos++;\n            stream->count_zeros = 0;\n        }\n        else\n        {\n            stream->count_zeros++;\n            *write_pnt++ = byte;\n            stream->write_pos++;\n            if (stream->count_zeros == 2)\n            {   /* for num_bits = 32, this can add 2 more bytes extra for EPBS */\n                *write_pnt++ = 0x3;\n                stream->write_pos++;\n                stream->count_zeros = 0;\n            }\n        }\n    }\n\n    /* reset current_word and bit_left */\n    stream->current_word = 0;\n    stream->bit_left = WORD_SIZE;\n\n    return AVCENC_SUCCESS;\n}\n\n/* ======================================================================== */\n/*  Function : BitstreamWriteBits()                                         */\n/*  Date     : 3/29/2004                                                    */\n/*  Purpose  : Write up to machine word.                                    */\n/*  In/out   : Unused bits in 'code' must be all zeros.                     */\n/*  Return   : AVCENC_SUCCESS if successed, AVCENC_WRITE_FAIL if buffer is  */\n/*              full.                                                       */\n/*  Modified :                                                              */\n/* ======================================================================== */\nAVCEnc_Status BitstreamWriteBits(AVCEncBitstream *stream, int nBits, uint code)\n{\n    AVCEnc_Status status = AVCENC_SUCCESS;\n    int bit_left = stream->bit_left;\n    uint current_word = stream->current_word;\n\n    //DEBUG_LOG(userData,AVC_LOGTYPE_INFO,\"BitstreamWriteBits\",nBits,-1);\n\n    if (nBits > WORD_SIZE) /* has to be taken care of specially */\n    {\n        return AVCENC_FAIL; /* for now */\n        /* otherwise, break it down to 2 write of less than 16 bits at a time. */\n    }\n\n    if (nBits <= bit_left) /* more bits left in current_word */\n    {\n        stream->current_word = (current_word << nBits) | code;\n        stream->bit_left -= nBits;\n        if (stream->bit_left == 0) /* prepare for the next word */\n        {\n            status = AVCBitstreamSaveWord(stream);\n            return status;\n        }\n    }\n    else\n    {\n        stream->current_word = (current_word << bit_left) | (code >> (nBits - bit_left));\n\n        nBits -= bit_left;\n\n        stream->bit_left = 0;\n\n        status = AVCBitstreamSaveWord(stream); /* save current word */\n\n        stream->bit_left = WORD_SIZE - nBits;\n\n        stream->current_word = code; /* no extra masking for code, must be handled before saving */\n    }\n\n    return status;\n}\n\n\n/* ======================================================================== */\n/*  Function : BitstreamWrite1Bit()                                         */\n/*  Date     : 3/30/2004                                                    */\n/*  Purpose  : Write 1 bit                                                  */\n/*  In/out   : Unused bits in 'code' must be all zeros.                     */\n/*  Return   : AVCENC_SUCCESS if successed, AVCENC_WRITE_FAIL if buffer is  */\n/*              full.                                                       */\n/*  Modified :                                                              */\n/* ======================================================================== */\nAVCEnc_Status BitstreamWrite1Bit(AVCEncBitstream *stream, uint code)\n{\n    AVCEnc_Status status;\n    uint current_word = stream->current_word;\n\n    //DEBUG_LOG(userData,AVC_LOGTYPE_INFO,\"BitstreamWrite1Bit\",code,-1);\n\n    //if(1 <= bit_left) /* more bits left in current_word */\n    /* we can assume that there always be positive bit_left in the current word */\n    stream->current_word = (current_word << 1) | code;\n    stream->bit_left--;\n    if (stream->bit_left == 0) /* prepare for the next word */\n    {\n        status = AVCBitstreamSaveWord(stream);\n        return status;\n    }\n\n    return AVCENC_SUCCESS;\n}\n\n\n/* ======================================================================== */\n/*  Function : BitstreamTrailingBits()                                      */\n/*  Date     : 3/31/2004                                                    */\n/*  Purpose  : Add trailing bits and report the final EBSP size.            */\n/*  In/out   :                                                              */\n/*  Return   : AVCENC_SUCCESS if successed, AVCENC_WRITE_FAIL if buffer is  */\n/*              full.                                                       */\n/*  Modified :                                                              */\n/* ======================================================================== */\nAVCEnc_Status BitstreamTrailingBits(AVCEncBitstream *bitstream, uint *nal_size)\n{\n    (void)(nal_size);\n\n    AVCEnc_Status status;\n    int bit_left = bitstream->bit_left;\n\n    bit_left &= 0x7; /* modulo by 8 */\n    if (bit_left == 0) bit_left = 8;\n    /* bitstream->bit_left == 0 cannot happen here since it would have been Saved already */\n\n    status = BitstreamWriteBits(bitstream, bit_left, trailing_bits[bit_left]);\n\n    if (status != AVCENC_SUCCESS)\n    {\n        return status;\n    }\n\n    /* if it's not saved, save it. */\n    //if(bitstream->bit_left<(WORD_SIZE<<3)) /* in fact, no need to check */\n    {\n        status = AVCBitstreamSaveWord(bitstream);\n    }\n\n    return status;\n}\n\n/* check whether it's byte-aligned */\nbool byte_aligned(AVCEncBitstream *stream)\n{\n    if (stream->bit_left % 8)\n        return false;\n    else\n        return true;\n}\n\n\n/* determine whether overrun buffer can be used or not */\nAVCEnc_Status AVCBitstreamUseOverrunBuffer(AVCEncBitstream* stream, int numExtraBytes)\n{\n    AVCEncObject *encvid = (AVCEncObject*)stream->encvid;\n\n    if (stream->overrunBuffer != NULL) // overrunBuffer is set\n    {\n        if (stream->bitstreamBuffer != stream->overrunBuffer) // not already used\n        {\n            if (stream->write_pos + numExtraBytes >= stream->oBSize)\n            {\n                stream->oBSize = stream->write_pos + numExtraBytes + 100;\n                stream->oBSize &= (~0x3); // make it multiple of 4\n\n                // allocate new overrun Buffer\n                if (encvid->overrunBuffer)\n                {\n                    encvid->avcHandle->CBAVC_Free((uint32*)encvid->avcHandle->userData,\n                                                  (int)encvid->overrunBuffer);\n                }\n\n                encvid->oBSize = stream->oBSize;\n                encvid->overrunBuffer = (uint8*) encvid->avcHandle->CBAVC_Malloc(encvid->avcHandle->userData,\n                                        stream->oBSize, DEFAULT_ATTR);\n\n                stream->overrunBuffer = encvid->overrunBuffer;\n                if (stream->overrunBuffer == NULL)\n                {\n                    return AVCENC_FAIL;\n                }\n            }\n\n            // copy everything to overrun buffer and start using it.\n            oscl_memcpy(stream->overrunBuffer, stream->bitstreamBuffer, stream->write_pos);\n            stream->bitstreamBuffer = stream->overrunBuffer;\n            stream->buf_size = stream->oBSize;\n        }\n        else // overrun buffer is already used\n        {\n            stream->oBSize = stream->write_pos + numExtraBytes + 100;\n            stream->oBSize &= (~0x3); // make it multiple of 4\n\n            // allocate new overrun buffer\n            encvid->oBSize = stream->oBSize;\n            encvid->overrunBuffer = (uint8*) encvid->avcHandle->CBAVC_Malloc(encvid->avcHandle->userData,\n                                    stream->oBSize, DEFAULT_ATTR);\n\n            if (encvid->overrunBuffer == NULL)\n            {\n                return AVCENC_FAIL;\n            }\n\n\n            // copy from the old buffer to new buffer\n            oscl_memcpy(encvid->overrunBuffer, stream->overrunBuffer, stream->write_pos);\n            // free old buffer\n            encvid->avcHandle->CBAVC_Free((uint32*)encvid->avcHandle->userData,\n                                          (int)stream->overrunBuffer);\n\n            // assign pointer to new buffer\n            stream->overrunBuffer = encvid->overrunBuffer;\n            stream->bitstreamBuffer = stream->overrunBuffer;\n            stream->buf_size = stream->oBSize;\n        }\n\n        return AVCENC_SUCCESS;\n    }\n    else // overrunBuffer is not enable.\n    {\n        return AVCENC_FAIL;\n    }\n\n}\n\n\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/block.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"oscl_mem.h\"\n#include \"avcenc_lib.h\"\n\n/* subtract with the prediction and do transformation */\nvoid trans(uint8 *cur, int pitch, uint8 *predBlock, int16 *dataBlock)\n{\n    int16 *ptr = dataBlock;\n    int r0, r1, r2, r3, j;\n    int curpitch = (uint)pitch >> 16;\n    int predpitch = (pitch & 0xFFFF);\n\n    /* horizontal */\n    j = 4;\n    while (j > 0)\n    {\n        /* calculate the residue first */\n        r0 = cur[0] - predBlock[0];\n        r1 = cur[1] - predBlock[1];\n        r2 = cur[2] - predBlock[2];\n        r3 = cur[3] - predBlock[3];\n\n        r0 += r3;           //ptr[0] + ptr[3];\n        r3 = r0 - (r3 << 1);    //ptr[0] - ptr[3];\n        r1 += r2;           //ptr[1] + ptr[2];\n        r2 = r1 - (r2 << 1);    //ptr[1] - ptr[2];\n\n        ptr[0] = r0 + r1;\n        ptr[2] = r0 - r1;\n        ptr[1] = (r3 << 1) + r2;\n        ptr[3] = r3 - (r2 << 1);\n\n        ptr += 16;\n        predBlock += predpitch;\n        cur += curpitch;\n        j--;\n    }\n    /* vertical */\n    ptr = dataBlock;\n    j = 4;\n    while (j > 0)\n    {\n        r0 = ptr[0] + ptr[48];\n        r3 = ptr[0] - ptr[48];\n        r1 = ptr[16] + ptr[32];\n        r2 = ptr[16] - ptr[32];\n\n        ptr[0] = r0 + r1;\n        ptr[32] = r0 - r1;\n        ptr[16] = (r3 << 1) + r2;\n        ptr[48] = r3 - (r2 << 1);\n\n        ptr++;\n        j--;\n    }\n\n    return ;\n}\n\n\n/* do residue transform quant invquant, invtrans and write output out */\nint dct_luma(AVCEncObject *encvid, int blkidx, uint8 *cur, uint8 *org, int *coef_cost)\n{\n    AVCCommonObj *video = encvid->common;\n    int org_pitch = encvid->currInput->pitch;\n    int pitch = video->currPic->pitch;\n    int16 *coef = video->block;\n    uint8 *pred = video->pred_block; // size 16 for a 4x4 block\n    int pred_pitch = video->pred_pitch;\n    int r0, r1, r2, r3, j, k, idx;\n    int *level, *run;\n    int Qq, Rq, q_bits, qp_const, quant;\n    int data, lev, zero_run;\n    int numcoeff;\n\n    coef += ((blkidx & 0x3) << 2) + ((blkidx >> 2) << 6); /* point to the 4x4 block */\n\n    /* first take a 4x4 transform */\n    /* horizontal */\n    j = 4;\n    while (j > 0)\n    {\n        /* calculate the residue first */\n        r0 = org[0] - pred[0];   /* OPTIMIZEABLE */\n        r1 = org[1] - pred[1];\n        r2 = org[2] - pred[2];\n        r3 = org[3] - pred[3];\n\n        r0 += r3;           //ptr[0] + ptr[3];\n        r3 = r0 - (r3 << 1);    //ptr[0] - ptr[3];\n        r1 += r2;           //ptr[1] + ptr[2];\n        r2 = r1 - (r2 << 1);    //ptr[1] - ptr[2];\n\n        coef[0] = r0 + r1;\n        coef[2] = r0 - r1;\n        coef[1] = (r3 << 1) + r2;\n        coef[3] = r3 - (r2 << 1);\n\n        coef += 16;\n        org += org_pitch;\n        pred += pred_pitch;\n        j--;\n    }\n    /* vertical */\n    coef -= 64;\n    pred -= (pred_pitch << 2);\n    j = 4;\n    while (j > 0)   /* OPTIMIZABLE */\n    {\n        r0 = coef[0] + coef[48];\n        r3 = coef[0] - coef[48];\n        r1 = coef[16] + coef[32];\n        r2 = coef[16] - coef[32];\n\n        coef[0] = r0 + r1;\n        coef[32] = r0 - r1;\n        coef[16] = (r3 << 1) + r2;\n        coef[48] = r3 - (r2 << 1);\n\n        coef++;\n        j--;\n    }\n\n    coef -= 4;\n\n    /* quant */\n    level = encvid->level[ras2dec[blkidx]];\n    run = encvid->run[ras2dec[blkidx]];\n\n    Rq = video->QPy_mod_6;\n    Qq = video->QPy_div_6;\n    qp_const = encvid->qp_const;\n    q_bits = 15 + Qq;\n\n    zero_run = 0;\n    numcoeff = 0;\n    for (k = 0; k < 16; k++)\n    {\n        idx = ZZ_SCAN_BLOCK[k]; /* map back to raster scan order */\n        data = coef[idx];\n        quant = quant_coef[Rq][k];\n        if (data > 0)\n        {\n            lev = data * quant + qp_const;\n        }\n        else\n        {\n            lev = -data * quant + qp_const;\n        }\n        lev >>= q_bits;\n        if (lev)\n        {\n            *coef_cost += ((lev > 1) ? MAX_VALUE : COEFF_COST[DISABLE_THRESHOLDING][zero_run]);\n\n            /* dequant */\n            quant = dequant_coefres[Rq][k];\n            if (data > 0)\n            {\n                level[numcoeff] = lev;\n                coef[idx] = (lev * quant) << Qq;\n            }\n            else\n            {\n                level[numcoeff] = -lev;\n                coef[idx] = (-lev * quant) << Qq;\n            }\n            run[numcoeff++] = zero_run;\n            zero_run = 0;\n        }\n        else\n        {\n            zero_run++;\n            coef[idx] = 0;\n        }\n    }\n\n    if (video->currMB->mb_intra) // only do inverse transform with intra block\n    {\n        if (numcoeff) /* then do inverse transform */\n        {\n            for (j = 4; j > 0; j--) /* horizontal */\n            {\n                r0 = coef[0] + coef[2];\n                r1 = coef[0] - coef[2];\n                r2 = (coef[1] >> 1) - coef[3];\n                r3 = coef[1] + (coef[3] >> 1);\n\n                coef[0] = r0 + r3;\n                coef[1] = r1 + r2;\n                coef[2] = r1 - r2;\n                coef[3] = r0 - r3;\n\n                coef += 16;\n            }\n\n            coef -= 64;\n            for (j = 4; j > 0; j--) /* vertical, has to be done after horizontal */\n            {\n                r0 = coef[0] + coef[32];\n                r1 = coef[0] - coef[32];\n                r2 = (coef[16] >> 1) - coef[48];\n                r3 = coef[16] + (coef[48] >> 1);\n                r0 += r3;\n                r3 = (r0 - (r3 << 1)); /* r0-r3 */\n                r1 += r2;\n                r2 = (r1 - (r2 << 1)); /* r1-r2 */\n                r0 += 32;\n                r1 += 32;\n                r2 += 32;\n                r3 += 32;\n\n                r0 = pred[0] + (r0 >> 6);\n                if ((uint)r0 > 0xFF)   r0 = 0xFF & (~(r0 >> 31));  /* clip */\n                r1 = *(pred += pred_pitch) + (r1 >> 6);\n                if ((uint)r1 > 0xFF)   r1 = 0xFF & (~(r1 >> 31));  /* clip */\n                r2 = *(pred += pred_pitch) + (r2 >> 6);\n                if ((uint)r2 > 0xFF)   r2 = 0xFF & (~(r2 >> 31));  /* clip */\n                r3 = pred[pred_pitch] + (r3 >> 6);\n                if ((uint)r3 > 0xFF)   r3 = 0xFF & (~(r3 >> 31));  /* clip */\n\n                *cur = r0;\n                *(cur += pitch) = r1;\n                *(cur += pitch) = r2;\n                cur[pitch] = r3;\n                cur -= (pitch << 1);\n                cur++;\n                pred -= (pred_pitch << 1);\n                pred++;\n                coef++;\n            }\n        }\n        else  // copy from pred to cur\n        {\n            *((uint32*)cur) = *((uint32*)pred);\n            *((uint32*)(cur += pitch)) = *((uint32*)(pred += pred_pitch));\n            *((uint32*)(cur += pitch)) = *((uint32*)(pred += pred_pitch));\n            *((uint32*)(cur += pitch)) = *((uint32*)(pred += pred_pitch));\n        }\n    }\n\n    return numcoeff;\n}\n\n\nvoid MBInterIdct(AVCCommonObj *video, uint8 *curL, AVCMacroblock *currMB, int picPitch)\n{\n    int16 *coef, *coef8 = video->block;\n    uint8 *cur;  // the same as curL\n    int b8, b4;\n    int r0, r1, r2, r3, j, blkidx;\n\n    for (b8 = 0; b8 < 4; b8++)\n    {\n        cur = curL;\n        coef = coef8;\n\n        if (currMB->CBP&(1 << b8))\n        {\n            for (b4 = 0; b4 < 4; b4++)\n            {\n                blkidx = blkIdx2blkXY[b8][b4];\n                /* do IDCT */\n                if (currMB->nz_coeff[blkidx])\n                {\n                    for (j = 4; j > 0; j--) /* horizontal */\n                    {\n                        r0 = coef[0] + coef[2];\n                        r1 = coef[0] - coef[2];\n                        r2 = (coef[1] >> 1) - coef[3];\n                        r3 = coef[1] + (coef[3] >> 1);\n\n                        coef[0] = r0 + r3;\n                        coef[1] = r1 + r2;\n                        coef[2] = r1 - r2;\n                        coef[3] = r0 - r3;\n\n                        coef += 16;\n                    }\n\n                    coef -= 64;\n                    for (j = 4; j > 0; j--) /* vertical, has to be done after horizontal */\n                    {\n                        r0 = coef[0] + coef[32];\n                        r1 = coef[0] - coef[32];\n                        r2 = (coef[16] >> 1) - coef[48];\n                        r3 = coef[16] + (coef[48] >> 1);\n                        r0 += r3;\n                        r3 = (r0 - (r3 << 1)); /* r0-r3 */\n                        r1 += r2;\n                        r2 = (r1 - (r2 << 1)); /* r1-r2 */\n                        r0 += 32;\n                        r1 += 32;\n                        r2 += 32;\n                        r3 += 32;\n\n                        r0 = cur[0] + (r0 >> 6);\n                        if ((uint)r0 > 0xFF)   r0 = 0xFF & (~(r0 >> 31));  /* clip */\n                        *cur = r0;\n                        r1 = *(cur += picPitch) + (r1 >> 6);\n                        if ((uint)r1 > 0xFF)   r1 = 0xFF & (~(r1 >> 31));  /* clip */\n                        *cur = r1;\n                        r2 = *(cur += picPitch) + (r2 >> 6);\n                        if ((uint)r2 > 0xFF)   r2 = 0xFF & (~(r2 >> 31));  /* clip */\n                        *cur = r2;\n                        r3 = cur[picPitch] + (r3 >> 6);\n                        if ((uint)r3 > 0xFF)   r3 = 0xFF & (~(r3 >> 31));  /* clip */\n                        cur[picPitch] = r3;\n\n                        cur -= (picPitch << 1);\n                        cur++;\n                        coef++;\n                    }\n                    cur -= 4;\n                    coef -= 4;\n                }\n                if (b4&1)\n                {\n                    cur += ((picPitch << 2) - 4);\n                    coef += 60;\n                }\n                else\n                {\n                    cur += 4;\n                    coef += 4;\n                }\n            }\n        }\n\n        if (b8&1)\n        {\n            curL += ((picPitch << 3) - 8);\n            coef8 += 120;\n        }\n        else\n        {\n            curL += 8;\n            coef8 += 8;\n        }\n    }\n\n    return ;\n}\n\n/* performa dct, quant, iquant, idct for the entire MB */\nvoid dct_luma_16x16(AVCEncObject *encvid, uint8 *curL, uint8 *orgL)\n{\n    AVCCommonObj *video = encvid->common;\n    int pitch = video->currPic->pitch;\n    int org_pitch = encvid->currInput->pitch;\n    AVCMacroblock *currMB = video->currMB;\n    int16 *coef = video->block;\n    uint8 *pred = encvid->pred_i16[currMB->i16Mode];\n    int blk_x, blk_y, j, k, idx, b8, b4;\n    int r0, r1, r2, r3, m0, m1, m2 , m3;\n    int data, lev;\n    int *level, *run, zero_run, ncoeff;\n    int Rq, Qq, quant, q_bits, qp_const;\n    int offset_cur[4], offset_pred[4], offset;\n\n    /* horizontal */\n    for (j = 16; j > 0; j--)\n    {\n        for (blk_x = 4; blk_x > 0; blk_x--)\n        {\n            /* calculate the residue first */\n            r0 = *orgL++ - *pred++;\n            r1 = *orgL++ - *pred++;\n            r2 = *orgL++ - *pred++;\n            r3 = *orgL++ - *pred++;\n\n            r0 += r3;           //ptr[0] + ptr[3];\n            r3 = r0 - (r3 << 1);    //ptr[0] - ptr[3];\n            r1 += r2;           //ptr[1] + ptr[2];\n            r2 = r1 - (r2 << 1);    //ptr[1] - ptr[2];\n\n            *coef++ = r0 + r1;\n            *coef++ = (r3 << 1) + r2;\n            *coef++ = r0 - r1;\n            *coef++ = r3 - (r2 << 1);\n        }\n        orgL += (org_pitch - 16);\n    }\n    pred -= 256;\n    coef -= 256;\n    /* vertical */\n    for (blk_y = 4; blk_y > 0; blk_y--)\n    {\n        for (j = 16; j > 0; j--)\n        {\n            r0 = coef[0] + coef[48];\n            r3 = coef[0] - coef[48];\n            r1 = coef[16] + coef[32];\n            r2 = coef[16] - coef[32];\n\n            coef[0] = r0 + r1;\n            coef[32] = r0 - r1;\n            coef[16] = (r3 << 1) + r2;\n            coef[48] = r3 - (r2 << 1);\n\n            coef++;\n        }\n        coef += 48;\n    }\n\n    /* then perform DC transform */\n    coef -= 256;\n    for (j = 4; j > 0; j--)\n    {\n        r0 = coef[0] + coef[12];\n        r3 = coef[0] - coef[12];\n        r1 = coef[4] + coef[8];\n        r2 = coef[4] - coef[8];\n\n        coef[0] = r0 + r1;\n        coef[8] = r0 - r1;\n        coef[4] = r3 + r2;\n        coef[12] = r3 - r2;\n        coef += 64;\n    }\n    coef -= 256;\n    for (j = 4; j > 0; j--)\n    {\n        r0 = coef[0] + coef[192];\n        r3 = coef[0] - coef[192];\n        r1 = coef[64] + coef[128];\n        r2 = coef[64] - coef[128];\n\n        coef[0] = (r0 + r1) >> 1;\n        coef[128] = (r0 - r1) >> 1;\n        coef[64] = (r3 + r2) >> 1;\n        coef[192] = (r3 - r2) >> 1;\n        coef += 4;\n    }\n\n    coef -= 16;\n    // then quantize DC\n    level = encvid->leveldc;\n    run = encvid->rundc;\n\n    Rq = video->QPy_mod_6;\n    Qq = video->QPy_div_6;\n    quant = quant_coef[Rq][0];\n    q_bits = 15 + Qq;\n    qp_const = encvid->qp_const;\n\n    zero_run = 0;\n    ncoeff = 0;\n    for (k = 0; k < 16; k++) /* in zigzag scan order */\n    {\n        idx = ZIGZAG2RASTERDC[k];\n        data = coef[idx];\n        if (data > 0)   // quant\n        {\n            lev = data * quant + (qp_const << 1);\n        }\n        else\n        {\n            lev = -data * quant + (qp_const << 1);\n        }\n        lev >>= (q_bits + 1);\n        if (lev) // dequant\n        {\n            if (data > 0)\n            {\n                level[ncoeff] = lev;\n                coef[idx] = lev;\n            }\n            else\n            {\n                level[ncoeff] = -lev;\n                coef[idx] = -lev;\n            }\n            run[ncoeff++] = zero_run;\n            zero_run = 0;\n        }\n        else\n        {\n            zero_run++;\n            coef[idx] = 0;\n        }\n    }\n\n    /* inverse transform DC */\n    encvid->numcoefdc = ncoeff;\n    if (ncoeff)\n    {\n        quant = dequant_coefres[Rq][0];\n\n        for (j = 0; j < 4; j++)\n        {\n            m0 = coef[0] + coef[4];\n            m1 = coef[0] - coef[4];\n            m2 = coef[8] + coef[12];\n            m3 = coef[8] - coef[12];\n\n\n            coef[0] = m0 + m2;\n            coef[4] = m0 - m2;\n            coef[8] = m1 - m3;\n            coef[12] = m1 + m3;\n            coef += 64;\n        }\n\n        coef -= 256;\n\n        if (Qq >= 2)  /* this way should be faster than JM */\n        {           /* they use (((m4*scale)<<(QPy/6))+2)>>2 for both cases. */\n            Qq -= 2;\n            for (j = 0; j < 4; j++)\n            {\n                m0 = coef[0] + coef[64];\n                m1 = coef[0] - coef[64];\n                m2 = coef[128] + coef[192];\n                m3 = coef[128] - coef[192];\n\n                coef[0] = ((m0 + m2) * quant) << Qq;\n                coef[64] = ((m0 - m2) * quant) << Qq;\n                coef[128] = ((m1 - m3) * quant) << Qq;\n                coef[192] = ((m1 + m3) * quant) << Qq;\n                coef += 4;\n            }\n            Qq += 2; /* restore the value */\n        }\n        else\n        {\n            Qq = 2 - Qq;\n            offset = 1 << (Qq - 1);\n\n            for (j = 0; j < 4; j++)\n            {\n                m0 = coef[0] + coef[64];\n                m1 = coef[0] - coef[64];\n                m2 = coef[128] + coef[192];\n                m3 = coef[128] - coef[192];\n\n                coef[0] = (((m0 + m2) * quant + offset) >> Qq);\n                coef[64] = (((m0 - m2) * quant + offset) >> Qq);\n                coef[128] = (((m1 - m3) * quant + offset) >> Qq);\n                coef[192] = (((m1 + m3) * quant + offset) >> Qq);\n                coef += 4;\n            }\n            Qq = 2 - Qq; /* restore the value */\n        }\n        coef -= 16; /* back to the origin */\n    }\n\n    /* now zigzag scan ac coefs, quant, iquant and itrans */\n    run = encvid->run[0];\n    level = encvid->level[0];\n\n    /* offset btw 4x4 block */\n    offset_cur[0] = 0;\n    offset_cur[1] = (pitch << 2) - 8;\n\n    /* offset btw 8x8 block */\n    offset_cur[2] = 8 - (pitch << 3);\n    offset_cur[3] = -8;\n\n    /* similarly for pred */\n    offset_pred[0] = 0;\n    offset_pred[1] = 56;\n    offset_pred[2] = -120;\n    offset_pred[3] = -8;\n\n    currMB->CBP = 0;\n\n    for (b8 = 0; b8 < 4; b8++)\n    {\n        for (b4 = 0; b4 < 4; b4++)\n        {\n\n            zero_run = 0;\n            ncoeff = 0;\n\n            for (k = 1; k < 16; k++)\n            {\n                idx = ZZ_SCAN_BLOCK[k]; /* map back to raster scan order */\n                data = coef[idx];\n                quant = quant_coef[Rq][k];\n                if (data > 0)\n                {\n                    lev = data * quant + qp_const;\n                }\n                else\n                {\n                    lev = -data * quant + qp_const;\n                }\n                lev >>= q_bits;\n                if (lev)\n                {   /* dequant */\n                    quant = dequant_coefres[Rq][k];\n                    if (data > 0)\n                    {\n                        level[ncoeff] = lev;\n                        coef[idx] = (lev * quant) << Qq;\n                    }\n                    else\n                    {\n                        level[ncoeff] = -lev;\n                        coef[idx] = (-lev * quant) << Qq;\n                    }\n                    run[ncoeff++] = zero_run;\n                    zero_run = 0;\n                }\n                else\n                {\n                    zero_run++;\n                    coef[idx] = 0;\n                }\n            }\n\n            currMB->nz_coeff[blkIdx2blkXY[b8][b4]] = ncoeff; /* in raster scan !!! */\n            if (ncoeff)\n            {\n                currMB->CBP |= (1 << b8);\n\n                // do inverse transform here\n                for (j = 4; j > 0; j--)\n                {\n                    r0 = coef[0] + coef[2];\n                    r1 = coef[0] - coef[2];\n                    r2 = (coef[1] >> 1) - coef[3];\n                    r3 = coef[1] + (coef[3] >> 1);\n\n                    coef[0] = r0 + r3;\n                    coef[1] = r1 + r2;\n                    coef[2] = r1 - r2;\n                    coef[3] = r0 - r3;\n\n                    coef += 16;\n                }\n                coef -= 64;\n                for (j = 4; j > 0; j--)\n                {\n                    r0 = coef[0] + coef[32];\n                    r1 = coef[0] - coef[32];\n                    r2 = (coef[16] >> 1) - coef[48];\n                    r3 = coef[16] + (coef[48] >> 1);\n\n                    r0 += r3;\n                    r3 = (r0 - (r3 << 1)); /* r0-r3 */\n                    r1 += r2;\n                    r2 = (r1 - (r2 << 1)); /* r1-r2 */\n                    r0 += 32;\n                    r1 += 32;\n                    r2 += 32;\n                    r3 += 32;\n                    r0 = pred[0] + (r0 >> 6);\n                    if ((uint)r0 > 0xFF)   r0 = 0xFF & (~(r0 >> 31));  /* clip */\n                    r1 = pred[16] + (r1 >> 6);\n                    if ((uint)r1 > 0xFF)   r1 = 0xFF & (~(r1 >> 31));  /* clip */\n                    r2 = pred[32] + (r2 >> 6);\n                    if ((uint)r2 > 0xFF)   r2 = 0xFF & (~(r2 >> 31));  /* clip */\n                    r3 = pred[48] + (r3 >> 6);\n                    if ((uint)r3 > 0xFF)   r3 = 0xFF & (~(r3 >> 31));  /* clip */\n                    *curL = r0;\n                    *(curL += pitch) = r1;\n                    *(curL += pitch) = r2;\n                    curL[pitch] = r3;\n                    curL -= (pitch << 1);\n                    curL++;\n                    pred++;\n                    coef++;\n                }\n            }\n            else  // do DC-only inverse\n            {\n                m0 = coef[0] + 32;\n\n                for (j = 4; j > 0; j--)\n                {\n                    r0 = pred[0] + (m0 >> 6);\n                    if ((uint)r0 > 0xFF)   r0 = 0xFF & (~(r0 >> 31));  /* clip */\n                    r1 = pred[16] + (m0 >> 6);\n                    if ((uint)r1 > 0xFF)   r1 = 0xFF & (~(r1 >> 31));  /* clip */\n                    r2 = pred[32] + (m0 >> 6);\n                    if ((uint)r2 > 0xFF)   r2 = 0xFF & (~(r2 >> 31));  /* clip */\n                    r3 = pred[48] + (m0 >> 6);\n                    if ((uint)r3 > 0xFF)   r3 = 0xFF & (~(r3 >> 31));  /* clip */\n                    *curL = r0;\n                    *(curL += pitch) = r1;\n                    *(curL += pitch) = r2;\n                    curL[pitch] = r3;\n                    curL -= (pitch << 1);\n                    curL++;\n                    pred++;\n                }\n                coef += 4;\n            }\n\n            run += 16;  // follow coding order\n            level += 16;\n            curL += offset_cur[b4&1];\n            pred += offset_pred[b4&1];\n            coef += offset_pred[b4&1];\n        }\n\n        curL += offset_cur[2 + (b8&1)];\n        pred += offset_pred[2 + (b8&1)];\n        coef += offset_pred[2 + (b8&1)];\n    }\n\n    return ;\n}\n\n\nvoid dct_chroma(AVCEncObject *encvid, uint8 *curC, uint8 *orgC, int cr)\n{\n    AVCCommonObj *video = encvid->common;\n    AVCMacroblock *currMB = video->currMB;\n    int org_pitch = (encvid->currInput->pitch) >> 1;\n    int pitch = (video->currPic->pitch) >> 1;\n    int pred_pitch = 16;\n    int16 *coef = video->block + 256;\n    uint8 *pred = video->pred_block;\n    int j, blk_x, blk_y, k, idx, b4;\n    int r0, r1, r2, r3, m0;\n    int Qq, Rq, qp_const, q_bits, quant;\n    int *level, *run, zero_run, ncoeff;\n    int data, lev;\n    int offset_cur[2], offset_pred[2], offset_coef[2];\n    uint8 nz_temp[4];\n    int  coeff_cost;\n\n    if (cr)\n    {\n        coef += 8;\n        pred += 8;\n    }\n\n    if (currMB->mb_intra == 0) // inter mode\n    {\n        pred = curC;\n        pred_pitch = pitch;\n    }\n\n    /* do 4x4 transform */\n    /* horizontal */\n    for (j = 8; j > 0; j--)\n    {\n        for (blk_x = 2; blk_x > 0; blk_x--)\n        {\n            /* calculate the residue first */\n            r0 = *orgC++ - *pred++;\n            r1 = *orgC++ - *pred++;\n            r2 = *orgC++ - *pred++;\n            r3 = *orgC++ - *pred++;\n\n            r0 += r3;           //ptr[0] + ptr[3];\n            r3 = r0 - (r3 << 1);    //ptr[0] - ptr[3];\n            r1 += r2;           //ptr[1] + ptr[2];\n            r2 = r1 - (r2 << 1);    //ptr[1] - ptr[2];\n\n            *coef++ = r0 + r1;\n            *coef++ = (r3 << 1) + r2;\n            *coef++ = r0 - r1;\n            *coef++ = r3 - (r2 << 1);\n\n        }\n        coef += 8; // coef pitch is 16\n        pred += (pred_pitch - 8); // pred_pitch is 16\n        orgC += (org_pitch - 8);\n    }\n    pred -= (pred_pitch << 3);\n    coef -= 128;\n    /* vertical */\n    for (blk_y = 2; blk_y > 0; blk_y--)\n    {\n        for (j = 8; j > 0; j--)\n        {\n            r0 = coef[0] + coef[48];\n            r3 = coef[0] - coef[48];\n            r1 = coef[16] + coef[32];\n            r2 = coef[16] - coef[32];\n\n            coef[0] = r0 + r1;\n            coef[32] = r0 - r1;\n            coef[16] = (r3 << 1) + r2;\n            coef[48] = r3 - (r2 << 1);\n\n            coef++;\n        }\n        coef += 56;\n    }\n    /* then perform DC transform */\n    coef -= 128;\n\n    /* 2x2 transform of DC components*/\n    r0 = coef[0];\n    r1 = coef[4];\n    r2 = coef[64];\n    r3 = coef[68];\n\n    coef[0] = r0 + r1 + r2 + r3;\n    coef[4] = r0 - r1 + r2 - r3;\n    coef[64] = r0 + r1 - r2 - r3;\n    coef[68] = r0 - r1 - r2 + r3;\n\n    Qq    = video->QPc_div_6;\n    Rq    = video->QPc_mod_6;\n    quant = quant_coef[Rq][0];\n    q_bits    = 15 + Qq;\n    qp_const = encvid->qp_const_c;\n\n    zero_run = 0;\n    ncoeff = 0;\n    run = encvid->runcdc + (cr << 2);\n    level = encvid->levelcdc + (cr << 2);\n\n    /* in zigzag scan order */\n    for (k = 0; k < 4; k++)\n    {\n        idx = ((k >> 1) << 6) + ((k & 1) << 2);\n        data = coef[idx];\n        if (data > 0)\n        {\n            lev = data * quant + (qp_const << 1);\n        }\n        else\n        {\n            lev = -data * quant + (qp_const << 1);\n        }\n        lev >>= (q_bits + 1);\n        if (lev)\n        {\n            if (data > 0)\n            {\n                level[ncoeff] = lev;\n                coef[idx] = lev;\n            }\n            else\n            {\n                level[ncoeff] = -lev;\n                coef[idx] = -lev;\n            }\n            run[ncoeff++] = zero_run;\n            zero_run = 0;\n        }\n        else\n        {\n            zero_run++;\n            coef[idx] = 0;\n        }\n    }\n\n    encvid->numcoefcdc[cr] = ncoeff;\n\n    if (ncoeff)\n    {\n        currMB->CBP |= (1 << 4); // DC present\n        // do inverse transform\n        quant = dequant_coefres[Rq][0];\n\n        r0 = coef[0] + coef[4];\n        r1 = coef[0] - coef[4];\n        r2 = coef[64] + coef[68];\n        r3 = coef[64] - coef[68];\n\n        r0 += r2;\n        r2 = r0 - (r2 << 1);\n        r1 += r3;\n        r3 = r1 - (r3 << 1);\n\n        if (Qq >= 1)\n        {\n            Qq -= 1;\n            coef[0] = (r0 * quant) << Qq;\n            coef[4] = (r1 * quant) << Qq;\n            coef[64] = (r2 * quant) << Qq;\n            coef[68] = (r3 * quant) << Qq;\n            Qq++;\n        }\n        else\n        {\n            coef[0] = (r0 * quant) >> 1;\n            coef[4] = (r1 * quant) >> 1;\n            coef[64] = (r2 * quant) >> 1;\n            coef[68] = (r3 * quant) >> 1;\n        }\n    }\n\n    /* now do AC zigzag scan, quant, iquant and itrans */\n    if (cr)\n    {\n        run = encvid->run[20];\n        level = encvid->level[20];\n    }\n    else\n    {\n        run = encvid->run[16];\n        level = encvid->level[16];\n    }\n\n    /* offset btw 4x4 block */\n    offset_cur[0] = 0;\n    offset_cur[1] = (pitch << 2) - 8;\n    offset_pred[0] = 0;\n    offset_pred[1] = (pred_pitch << 2) - 8;\n    offset_coef[0] = 0;\n    offset_coef[1] = 56;\n\n    coeff_cost = 0;\n\n    for (b4 = 0; b4 < 4; b4++)\n    {\n        zero_run = 0;\n        ncoeff = 0;\n        for (k = 1; k < 16; k++) /* in zigzag scan order */\n        {\n            idx = ZZ_SCAN_BLOCK[k]; /* map back to raster scan order */\n            data = coef[idx];\n            quant = quant_coef[Rq][k];\n            if (data > 0)\n            {\n                lev = data * quant + qp_const;\n            }\n            else\n            {\n                lev = -data * quant + qp_const;\n            }\n            lev >>= q_bits;\n            if (lev)\n            {\n                /* for RD performance*/\n                if (lev > 1)\n                    coeff_cost += MAX_VALUE;                // set high cost, shall not be discarded\n                else\n                    coeff_cost += COEFF_COST[DISABLE_THRESHOLDING][zero_run];\n\n                /* dequant */\n                quant = dequant_coefres[Rq][k];\n                if (data > 0)\n                {\n                    level[ncoeff] = lev;\n                    coef[idx] = (lev * quant) << Qq;\n                }\n                else\n                {\n                    level[ncoeff] = -lev;\n                    coef[idx] = (-lev * quant) << Qq;\n                }\n                run[ncoeff++] = zero_run;\n                zero_run = 0;\n            }\n            else\n            {\n                zero_run++;\n                coef[idx] = 0;\n            }\n        }\n\n        nz_temp[b4] = ncoeff; // raster scan\n\n        // just advance the pointers for now, do IDCT later\n        coef += 4;\n        run += 16;\n        level += 16;\n        coef += offset_coef[b4&1];\n    }\n\n    /* rewind the pointers */\n    coef -= 128;\n\n    if (coeff_cost < _CHROMA_COEFF_COST_)\n    {\n        /* if it's not efficient to encode any blocks.\n        Just do DC only */\n        /* We can reset level and run also, but setting nz to zero should be enough. */\n        currMB->nz_coeff[16+(cr<<1)] = 0;\n        currMB->nz_coeff[17+(cr<<1)] = 0;\n        currMB->nz_coeff[20+(cr<<1)] = 0;\n        currMB->nz_coeff[21+(cr<<1)] = 0;\n\n        for (b4 = 0; b4 < 4; b4++)\n        {\n            // do DC-only inverse\n            m0 = coef[0] + 32;\n\n            for (j = 4; j > 0; j--)\n            {\n                r0 = pred[0] + (m0 >> 6);\n                if ((uint)r0 > 0xFF)   r0 = 0xFF & (~(r0 >> 31));  /* clip */\n                r1 = *(pred += pred_pitch) + (m0 >> 6);\n                if ((uint)r1 > 0xFF)   r1 = 0xFF & (~(r1 >> 31));  /* clip */\n                r2 = pred[pred_pitch] + (m0 >> 6);\n                if ((uint)r2 > 0xFF)   r2 = 0xFF & (~(r2 >> 31));  /* clip */\n                r3 = pred[pred_pitch<<1] + (m0 >> 6);\n                if ((uint)r3 > 0xFF)   r3 = 0xFF & (~(r3 >> 31));  /* clip */\n                *curC = r0;\n                *(curC += pitch) = r1;\n                *(curC += pitch) = r2;\n                curC[pitch] = r3;\n                curC -= (pitch << 1);\n                curC++;\n                pred += (1 - pred_pitch);\n            }\n            coef += 4;\n            curC += offset_cur[b4&1];\n            pred += offset_pred[b4&1];\n            coef += offset_coef[b4&1];\n        }\n    }\n    else // not dropping anything, continue with the IDCT\n    {\n        for (b4 = 0; b4 < 4; b4++)\n        {\n            ncoeff = nz_temp[b4] ; // in raster scan\n            currMB->nz_coeff[16+(b4&1)+(cr<<1)+((b4>>1)<<2)] = ncoeff; // in raster scan\n\n            if (ncoeff) // do a check on the nonzero-coeff\n            {\n                currMB->CBP |= (2 << 4);\n\n                // do inverse transform here\n                for (j = 4; j > 0; j--)\n                {\n                    r0 = coef[0] + coef[2];\n                    r1 = coef[0] - coef[2];\n                    r2 = (coef[1] >> 1) - coef[3];\n                    r3 = coef[1] + (coef[3] >> 1);\n\n                    coef[0] = r0 + r3;\n                    coef[1] = r1 + r2;\n                    coef[2] = r1 - r2;\n                    coef[3] = r0 - r3;\n\n                    coef += 16;\n                }\n                coef -= 64;\n                for (j = 4; j > 0; j--)\n                {\n                    r0 = coef[0] + coef[32];\n                    r1 = coef[0] - coef[32];\n                    r2 = (coef[16] >> 1) - coef[48];\n                    r3 = coef[16] + (coef[48] >> 1);\n\n                    r0 += r3;\n                    r3 = (r0 - (r3 << 1)); /* r0-r3 */\n                    r1 += r2;\n                    r2 = (r1 - (r2 << 1)); /* r1-r2 */\n                    r0 += 32;\n                    r1 += 32;\n                    r2 += 32;\n                    r3 += 32;\n                    r0 = pred[0] + (r0 >> 6);\n                    if ((uint)r0 > 0xFF)   r0 = 0xFF & (~(r0 >> 31));  /* clip */\n                    r1 = *(pred += pred_pitch) + (r1 >> 6);\n                    if ((uint)r1 > 0xFF)   r1 = 0xFF & (~(r1 >> 31));  /* clip */\n                    r2 = pred[pred_pitch] + (r2 >> 6);\n                    if ((uint)r2 > 0xFF)   r2 = 0xFF & (~(r2 >> 31));  /* clip */\n                    r3 = pred[pred_pitch<<1] + (r3 >> 6);\n                    if ((uint)r3 > 0xFF)   r3 = 0xFF & (~(r3 >> 31));  /* clip */\n                    *curC = r0;\n                    *(curC += pitch) = r1;\n                    *(curC += pitch) = r2;\n                    curC[pitch] = r3;\n                    curC -= (pitch << 1);\n                    curC++;\n                    pred += (1 - pred_pitch);\n                    coef++;\n                }\n            }\n            else\n            {\n                // do DC-only inverse\n                m0 = coef[0] + 32;\n\n                for (j = 4; j > 0; j--)\n                {\n                    r0 = pred[0] + (m0 >> 6);\n                    if ((uint)r0 > 0xFF)   r0 = 0xFF & (~(r0 >> 31));  /* clip */\n                    r1 = *(pred += pred_pitch) + (m0 >> 6);\n                    if ((uint)r1 > 0xFF)   r1 = 0xFF & (~(r1 >> 31));  /* clip */\n                    r2 = pred[pred_pitch] + (m0 >> 6);\n                    if ((uint)r2 > 0xFF)   r2 = 0xFF & (~(r2 >> 31));  /* clip */\n                    r3 = pred[pred_pitch<<1] + (m0 >> 6);\n                    if ((uint)r3 > 0xFF)   r3 = 0xFF & (~(r3 >> 31));  /* clip */\n                    *curC = r0;\n                    *(curC += pitch) = r1;\n                    *(curC += pitch) = r2;\n                    curC[pitch] = r3;\n                    curC -= (pitch << 1);\n                    curC++;\n                    pred += (1 - pred_pitch);\n                }\n                coef += 4;\n            }\n            curC += offset_cur[b4&1];\n            pred += offset_pred[b4&1];\n            coef += offset_coef[b4&1];\n        }\n    }\n\n    return ;\n}\n\n\n/* only DC transform */\nint TransQuantIntra16DC(AVCEncObject *encvid)\n{\n    AVCCommonObj *video = encvid->common;\n    int16 *block = video->block;\n    int *level = encvid->leveldc;\n    int *run = encvid->rundc;\n    int16 *ptr = block;\n    int r0, r1, r2, r3, j;\n    int Qq = video->QPy_div_6;\n    int Rq = video->QPy_mod_6;\n    int q_bits, qp_const, quant;\n    int data, lev, zero_run;\n    int k, ncoeff, idx;\n\n    /* DC transform */\n    /* horizontal */\n    j = 4;\n    while (j)\n    {\n        r0 = ptr[0] + ptr[12];\n        r3 = ptr[0] - ptr[12];\n        r1 = ptr[4] + ptr[8];\n        r2 = ptr[4] - ptr[8];\n\n        ptr[0] = r0 + r1;\n        ptr[8] = r0 - r1;\n        ptr[4] = r3 + r2;\n        ptr[12] = r3 - r2;\n        ptr += 64;\n        j--;\n    }\n    /* vertical */\n    ptr = block;\n    j = 4;\n    while (j)\n    {\n        r0 = ptr[0] + ptr[192];\n        r3 = ptr[0] - ptr[192];\n        r1 = ptr[64] + ptr[128];\n        r2 = ptr[64] - ptr[128];\n\n        ptr[0] = (r0 + r1) >> 1;\n        ptr[128] = (r0 - r1) >> 1;\n        ptr[64] = (r3 + r2) >> 1;\n        ptr[192] = (r3 - r2) >> 1;\n        ptr += 4;\n        j--;\n    }\n\n    quant = quant_coef[Rq][0];\n    q_bits    = 15 + Qq;\n    qp_const = (1 << q_bits) / 3;    // intra\n\n    zero_run = 0;\n    ncoeff = 0;\n\n    for (k = 0; k < 16; k++) /* in zigzag scan order */\n    {\n        idx = ZIGZAG2RASTERDC[k];\n        data = block[idx];\n        if (data > 0)\n        {\n            lev = data * quant + (qp_const << 1);\n        }\n        else\n        {\n            lev = -data * quant + (qp_const << 1);\n        }\n        lev >>= (q_bits + 1);\n        if (lev)\n        {\n            if (data > 0)\n            {\n                level[ncoeff] = lev;\n                block[idx] = lev;\n            }\n            else\n            {\n                level[ncoeff] = -lev;\n                block[idx] = -lev;\n            }\n            run[ncoeff++] = zero_run;\n            zero_run = 0;\n        }\n        else\n        {\n            zero_run++;\n            block[idx] = 0;\n        }\n    }\n    return ncoeff;\n}\n\nint TransQuantChromaDC(AVCEncObject *encvid, int16 *block, int slice_type, int cr)\n{\n    AVCCommonObj *video = encvid->common;\n    int *level, *run;\n    int r0, r1, r2, r3;\n    int Qq, Rq, q_bits, qp_const, quant;\n    int data, lev, zero_run;\n    int k, ncoeff, idx;\n\n    level = encvid->levelcdc + (cr << 2); /* cb or cr */\n    run = encvid->runcdc + (cr << 2);\n\n    /* 2x2 transform of DC components*/\n    r0 = block[0];\n    r1 = block[4];\n    r2 = block[64];\n    r3 = block[68];\n\n    block[0] = r0 + r1 + r2 + r3;\n    block[4] = r0 - r1 + r2 - r3;\n    block[64] = r0 + r1 - r2 - r3;\n    block[68] = r0 - r1 - r2 + r3;\n\n    Qq    = video->QPc_div_6;\n    Rq    = video->QPc_mod_6;\n    quant = quant_coef[Rq][0];\n    q_bits    = 15 + Qq;\n    if (slice_type == AVC_I_SLICE)\n    {\n        qp_const = (1 << q_bits) / 3;\n    }\n    else\n    {\n        qp_const = (1 << q_bits) / 6;\n    }\n\n    zero_run = 0;\n    ncoeff = 0;\n\n    for (k = 0; k < 4; k++) /* in zigzag scan order */\n    {\n        idx = ((k >> 1) << 6) + ((k & 1) << 2);\n        data = block[idx];\n        if (data > 0)\n        {\n            lev = data * quant + (qp_const << 1);\n        }\n        else\n        {\n            lev = -data * quant + (qp_const << 1);\n        }\n        lev >>= (q_bits + 1);\n        if (lev)\n        {\n            if (data > 0)\n            {\n                level[ncoeff] = lev;\n                block[idx] = lev;\n            }\n            else\n            {\n                level[ncoeff] = -lev;\n                block[idx] = -lev;\n            }\n            run[ncoeff++] = zero_run;\n            zero_run = 0;\n        }\n        else\n        {\n            zero_run++;\n            block[idx] = 0;\n        }\n    }\n    return ncoeff;\n}\n\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/findhalfpel.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"avcenc_lib.h\"\n#include \"oscl_base_macros.h\"\n/* 3/29/01 fast half-pel search based on neighboring guess */\n/* value ranging from 0 to 4, high complexity (more accurate) to\n   low complexity (less accurate) */\n#define HP_DISTANCE_TH      5 // 2  /* half-pel distance threshold */\n\n#define PREF_16_VEC 129     /* 1MV bias versus 4MVs*/\n\nconst static int distance_tab[9][9] =   /* [hp_guess][k] */\n{\n    {0, 1, 1, 1, 1, 1, 1, 1, 1},\n    {1, 0, 1, 2, 3, 4, 3, 2, 1},\n    {1, 0, 0, 0, 1, 2, 3, 2, 1},\n    {1, 2, 1, 0, 1, 2, 3, 4, 3},\n    {1, 2, 1, 0, 0, 0, 1, 2, 3},\n    {1, 4, 3, 2, 1, 0, 1, 2, 3},\n    {1, 2, 3, 2, 1, 0, 0, 0, 1},\n    {1, 2, 3, 4, 3, 2, 1, 0, 1},\n    {1, 0, 1, 2, 3, 2, 1, 0, 0}\n};\n\n#define CLIP_RESULT(x)      if((uint)x > 0xFF){ \\\n                 x = 0xFF & (~(x>>31));}\n\n#define CLIP_UPPER16(x)     if((uint)x >= 0x20000000){ \\\n        x = 0xFF0000 & (~(x>>31));} \\\n        else { \\\n        x = (x>>5)&0xFF0000; \\\n        }\n\n/*=====================================================================\n    Function:   AVCFindHalfPelMB\n    Date:       10/31/2007\n    Purpose:    Find half pel resolution MV surrounding the full-pel MV\n=====================================================================*/\n\nint AVCFindHalfPelMB(AVCEncObject *encvid, uint8 *cur, AVCMV *mot, uint8 *ncand,\n                     int xpos, int ypos, int hp_guess, int cmvx, int cmvy)\n{\n    AVCPictureData *currPic = encvid->common->currPic;\n    int lx = currPic->pitch;\n    int d, dmin, satd_min;\n    uint8* cand;\n    int lambda_motion = encvid->lambda_motion;\n    uint8 *mvbits = encvid->mvbits;\n    int mvcost;\n    /* list of candidate to go through for half-pel search*/\n    uint8 *subpel_pred = (uint8*) encvid->subpel_pred; // all 16 sub-pel positions\n    uint8 **hpel_cand = (uint8**) encvid->hpel_cand; /* half-pel position */\n\n    int xh[9] = {0, 0, 2, 2, 2, 0, -2, -2, -2};\n    int yh[9] = {0, -2, -2, 0, 2, 2, 2, 0, -2};\n    int xq[8] = {0, 1, 1, 1, 0, -1, -1, -1};\n    int yq[8] = { -1, -1, 0, 1, 1, 1, 0, -1};\n    int h, hmin, q, qmin;\n\n    OSCL_UNUSED_ARG(xpos);\n    OSCL_UNUSED_ARG(ypos);\n    OSCL_UNUSED_ARG(hp_guess);\n\n    GenerateHalfPelPred(subpel_pred, ncand, lx);\n\n    cur = encvid->currYMB; // pre-load current original MB\n\n    cand = hpel_cand[0];\n\n    // find cost for the current full-pel position\n    dmin = SATD_MB(cand, cur, 65535); // get Hadamaard transform SAD\n    mvcost = MV_COST_S(lambda_motion, mot->x, mot->y, cmvx, cmvy);\n    satd_min = dmin;\n    dmin += mvcost;\n    hmin = 0;\n\n    /* find half-pel */\n    for (h = 1; h < 9; h++)\n    {\n        d = SATD_MB(hpel_cand[h], cur, dmin);\n        mvcost = MV_COST_S(lambda_motion, mot->x + xh[h], mot->y + yh[h], cmvx, cmvy);\n        d += mvcost;\n\n        if (d < dmin)\n        {\n            dmin = d;\n            hmin = h;\n            satd_min = d - mvcost;\n        }\n    }\n\n    mot->sad = dmin;\n    mot->x += xh[hmin];\n    mot->y += yh[hmin];\n    encvid->best_hpel_pos = hmin;\n\n    /*** search for quarter-pel ****/\n    GenerateQuartPelPred(encvid->bilin_base[hmin], &(encvid->qpel_cand[0][0]), hmin);\n\n    encvid->best_qpel_pos = qmin = -1;\n\n    for (q = 0; q < 8; q++)\n    {\n        d = SATD_MB(encvid->qpel_cand[q], cur, dmin);\n        mvcost = MV_COST_S(lambda_motion, mot->x + xq[q], mot->y + yq[q], cmvx, cmvy);\n        d += mvcost;\n        if (d < dmin)\n        {\n            dmin = d;\n            qmin = q;\n            satd_min = d - mvcost;\n        }\n    }\n\n    if (qmin != -1)\n    {\n        mot->sad = dmin;\n        mot->x += xq[qmin];\n        mot->y += yq[qmin];\n        encvid->best_qpel_pos = qmin;\n    }\n\n    return satd_min;\n}\n\n\n\n/** This function generates sub-pel prediction around the full-pel candidate.\nEach sub-pel position array is 20 pixel wide (for word-alignment) and 17 pixel tall. */\n/** The sub-pel position is labeled in spiral manner from the center. */\n\nvoid GenerateHalfPelPred(uint8* subpel_pred, uint8 *ncand, int lx)\n{\n    /* let's do straightforward way first */\n    uint8 *ref;\n    uint8 *dst;\n    uint8 tmp8;\n    int32 tmp32;\n    int16 tmp_horz[18*22], *dst_16, *src_16;\n    register int a = 0, b = 0, c = 0, d = 0, e = 0, f = 0; // temp register\n    int msk;\n    int i, j;\n\n    /* first copy full-pel to the first array */\n    /* to be optimized later based on byte-offset load */\n    ref = ncand - 3 - lx - (lx << 1); /* move back (-3,-3) */\n    dst = subpel_pred;\n\n    dst -= 4; /* offset */\n    for (j = 0; j < 22; j++) /* 24x22 */\n    {\n        i = 6;\n        while (i > 0)\n        {\n            tmp32 = *ref++;\n            tmp8 = *ref++;\n            tmp32 |= (tmp8 << 8);\n            tmp8 = *ref++;\n            tmp32 |= (tmp8 << 16);\n            tmp8 = *ref++;\n            tmp32 |= (tmp8 << 24);\n            *((uint32*)(dst += 4)) = tmp32;\n            i--;\n        }\n        ref += (lx - 24);\n    }\n\n    /* from the first array, we do horizontal interp */\n    ref = subpel_pred + 2;\n    dst_16 = tmp_horz; /* 17 x 22 */\n\n    for (j = 4; j > 0; j--)\n    {\n        for (i = 16; i > 0; i -= 4)\n        {\n            a = ref[-2];\n            b = ref[-1];\n            c = ref[0];\n            d = ref[1];\n            e = ref[2];\n            f = ref[3];\n            *dst_16++ = a + f - 5 * (b + e) + 20 * (c + d);\n            a = ref[4];\n            *dst_16++ = b + a - 5 * (c + f) + 20 * (d + e);\n            b = ref[5];\n            *dst_16++ = c + b - 5 * (d + a) + 20 * (e + f);\n            c = ref[6];\n            *dst_16++ = d + c - 5 * (e + b) + 20 * (f + a);\n\n            ref += 4;\n        }\n        /* do the 17th column here */\n        d = ref[3];\n        *dst_16 =  e + d - 5 * (f + c) + 20 * (a + b);\n        dst_16 += 2; /* stride for tmp_horz is 18 */\n        ref += 8;  /* stride for ref is 24 */\n        if (j == 3)  // move 18 lines down\n        {\n            dst_16 += 324;//18*18;\n            ref += 432;//18*24;\n        }\n    }\n\n    ref -= 480;//20*24;\n    dst_16 -= 360;//20*18;\n    dst = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE; /* go to the 14th array 17x18*/\n\n    for (j = 18; j > 0; j--)\n    {\n        for (i = 16; i > 0; i -= 4)\n        {\n            a = ref[-2];\n            b = ref[-1];\n            c = ref[0];\n            d = ref[1];\n            e = ref[2];\n            f = ref[3];\n            tmp32 = a + f - 5 * (b + e) + 20 * (c + d);\n            *dst_16++ = tmp32;\n            tmp32 = (tmp32 + 16) >> 5;\n            CLIP_RESULT(tmp32)\n            *dst++ = tmp32;\n\n            a = ref[4];\n            tmp32 = b + a - 5 * (c + f) + 20 * (d + e);\n            *dst_16++ = tmp32;\n            tmp32 = (tmp32 + 16) >> 5;\n            CLIP_RESULT(tmp32)\n            *dst++ = tmp32;\n\n            b = ref[5];\n            tmp32 = c + b - 5 * (d + a) + 20 * (e + f);\n            *dst_16++ = tmp32;\n            tmp32 = (tmp32 + 16) >> 5;\n            CLIP_RESULT(tmp32)\n            *dst++ = tmp32;\n\n            c = ref[6];\n            tmp32 = d + c - 5 * (e + b) + 20 * (f + a);\n            *dst_16++ = tmp32;\n            tmp32 = (tmp32 + 16) >> 5;\n            CLIP_RESULT(tmp32)\n            *dst++ = tmp32;\n\n            ref += 4;\n        }\n        /* do the 17th column here */\n        d = ref[3];\n        tmp32 =  e + d - 5 * (f + c) + 20 * (a + b);\n        *dst_16 = tmp32;\n        tmp32 = (tmp32 + 16) >> 5;\n        CLIP_RESULT(tmp32)\n        *dst = tmp32;\n\n        dst += 8;  /* stride for dst is 24 */\n        dst_16 += 2; /* stride for tmp_horz is 18 */\n        ref += 8;  /* stride for ref is 24 */\n    }\n\n\n    /* Do middle point filtering*/\n    src_16 = tmp_horz; /* 17 x 22 */\n    dst = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE; /* 12th array 17x17*/\n    dst -= 24; // offset\n    for (i = 0; i < 17; i++)\n    {\n        for (j = 16; j > 0; j -= 4)\n        {\n            a = *src_16;\n            b = *(src_16 += 18);\n            c = *(src_16 += 18);\n            d = *(src_16 += 18);\n            e = *(src_16 += 18);\n            f = *(src_16 += 18);\n\n            tmp32 = a + f - 5 * (b + e) + 20 * (c + d);\n            tmp32 = (tmp32 + 512) >> 10;\n            CLIP_RESULT(tmp32)\n            *(dst += 24) = tmp32;\n\n            a = *(src_16 += 18);\n            tmp32 = b + a - 5 * (c + f) + 20 * (d + e);\n            tmp32 = (tmp32 + 512) >> 10;\n            CLIP_RESULT(tmp32)\n            *(dst += 24) = tmp32;\n\n            b = *(src_16 += 18);\n            tmp32 = c + b - 5 * (d + a) + 20 * (e + f);\n            tmp32 = (tmp32 + 512) >> 10;\n            CLIP_RESULT(tmp32)\n            *(dst += 24) = tmp32;\n\n            c = *(src_16 += 18);\n            tmp32 = d + c - 5 * (e + b) + 20 * (f + a);\n            tmp32 = (tmp32 + 512) >> 10;\n            CLIP_RESULT(tmp32)\n            *(dst += 24) = tmp32;\n\n            src_16 -= (18 << 2);\n        }\n\n        d = src_16[90]; // 18*5\n        tmp32 = e + d - 5 * (f + c) + 20 * (a + b);\n        tmp32 = (tmp32 + 512) >> 10;\n        CLIP_RESULT(tmp32)\n        dst[24] = tmp32;\n\n        src_16 -= ((18 << 4) - 1);\n        dst -= ((24 << 4) - 1);\n    }\n\n    /* do vertical interpolation */\n    ref = subpel_pred + 2;\n    dst = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE; /* 10th array 18x17 */\n    dst -= 24; // offset\n\n    for (i = 2; i > 0; i--)\n    {\n        for (j = 16; j > 0; j -= 4)\n        {\n            a = *ref;\n            b = *(ref += 24);\n            c = *(ref += 24);\n            d = *(ref += 24);\n            e = *(ref += 24);\n            f = *(ref += 24);\n\n            tmp32 = a + f - 5 * (b + e) + 20 * (c + d);\n            tmp32 = (tmp32 + 16) >> 5;\n            CLIP_RESULT(tmp32)\n            *(dst += 24) = tmp32;  // 10th\n\n            a = *(ref += 24);\n            tmp32 = b + a - 5 * (c + f) + 20 * (d + e);\n            tmp32 = (tmp32 + 16) >> 5;\n            CLIP_RESULT(tmp32)\n            *(dst += 24) = tmp32;  // 10th\n\n            b = *(ref += 24);\n            tmp32 = c + b - 5 * (d + a) + 20 * (e + f);\n            tmp32 = (tmp32 + 16) >> 5;\n            CLIP_RESULT(tmp32)\n            *(dst += 24) = tmp32;  // 10th\n\n            c = *(ref += 24);\n            tmp32 = d + c - 5 * (e + b) + 20 * (f + a);\n            tmp32 = (tmp32 + 16) >> 5;\n            CLIP_RESULT(tmp32)\n            *(dst += 24) = tmp32;  // 10th\n\n            ref -= (24 << 2);\n        }\n\n        d = ref[120]; // 24*5\n        tmp32 = e + d - 5 * (f + c) + 20 * (a + b);\n        tmp32 = (tmp32 + 16) >> 5;\n        CLIP_RESULT(tmp32)\n        dst[24] = tmp32;  // 10th\n\n        dst -= ((24 << 4) - 1);\n        ref -= ((24 << 4) - 1);\n    }\n\n    // note that using SIMD here doesn't help much, the cycle almost stays the same\n    // one can just use the above code and change the for(i=2 to for(i=18\n    for (i = 16; i > 0; i -= 4)\n    {\n        msk = 0;\n        for (j = 17; j > 0; j--)\n        {\n            a = *((uint32*)ref); /* load 4 bytes */\n            b = (a >> 8) & 0xFF00FF; /* second and fourth byte */\n            a &= 0xFF00FF;\n\n            c = *((uint32*)(ref + 120));\n            d = (c >> 8) & 0xFF00FF;\n            c &= 0xFF00FF;\n\n            a += c;\n            b += d;\n\n            e = *((uint32*)(ref + 72)); /* e, f */\n            f = (e >> 8) & 0xFF00FF;\n            e &= 0xFF00FF;\n\n            c = *((uint32*)(ref + 48)); /* c, d */\n            d = (c >> 8) & 0xFF00FF;\n            c &= 0xFF00FF;\n\n            c += e;\n            d += f;\n\n            a += 20 * c;\n            b += 20 * d;\n            a += 0x100010;\n            b += 0x100010;\n\n            e = *((uint32*)(ref += 24)); /* e, f */\n            f = (e >> 8) & 0xFF00FF;\n            e &= 0xFF00FF;\n\n            c = *((uint32*)(ref + 72)); /* c, d */\n            d = (c >> 8) & 0xFF00FF;\n            c &= 0xFF00FF;\n\n            c += e;\n            d += f;\n\n            a -= 5 * c;\n            b -= 5 * d;\n\n            c = a << 16;\n            d = b << 16;\n            CLIP_UPPER16(a)\n            CLIP_UPPER16(c)\n            CLIP_UPPER16(b)\n            CLIP_UPPER16(d)\n\n            a |= (c >> 16);\n            b |= (d >> 16);\n            //  a>>=5;\n            //  b>>=5;\n            /* clip */\n            //  msk |= b;  msk|=a;\n            //  a &= 0xFF00FF;\n            //  b &= 0xFF00FF;\n            a |= (b << 8);  /* pack it back */\n\n            *((uint16*)(dst += 24)) = a & 0xFFFF; //dst is not word-aligned.\n            *((uint16*)(dst + 2)) = a >> 16;\n\n        }\n        dst -= 404; // 24*17-4\n        ref -= 404;\n        /*      if(msk & 0xFF00FF00) // need clipping\n                {\n                    VertInterpWClip(dst,ref); // re-do 4 column with clip\n                }*/\n    }\n\n    return ;\n}\n\nvoid VertInterpWClip(uint8 *dst, uint8 *ref)\n{\n    int i, j;\n    int a, b, c, d, e, f;\n    int32 tmp32;\n\n    dst -= 4;\n    ref -= 4;\n\n    for (i = 4; i > 0; i--)\n    {\n        for (j = 16; j > 0; j -= 4)\n        {\n            a = *ref;\n            b = *(ref += 24);\n            c = *(ref += 24);\n            d = *(ref += 24);\n            e = *(ref += 24);\n            f = *(ref += 24);\n\n            tmp32 = a + f - 5 * (b + e) + 20 * (c + d);\n            tmp32 = (tmp32 + 16) >> 5;\n            CLIP_RESULT(tmp32)\n            *(dst += 24) = tmp32;  // 10th\n\n            a = *(ref += 24);\n            tmp32 = b + a - 5 * (c + f) + 20 * (d + e);\n            tmp32 = (tmp32 + 16) >> 5;\n            CLIP_RESULT(tmp32)\n            *(dst += 24) = tmp32;  // 10th\n\n            b = *(ref += 24);\n            tmp32 = c + b - 5 * (d + a) + 20 * (e + f);\n            tmp32 = (tmp32 + 16) >> 5;\n            CLIP_RESULT(tmp32)\n            *(dst += 24) = tmp32;  // 10th\n\n            c = *(ref += 24);\n            tmp32 = d + c - 5 * (e + b) + 20 * (f + a);\n            tmp32 = (tmp32 + 16) >> 5;\n            CLIP_RESULT(tmp32)\n            *(dst += 24) = tmp32;  // 10th\n\n            ref -= (24 << 2);\n        }\n\n        d = ref[120]; // 24*5\n        tmp32 = e + d - 5 * (f + c) + 20 * (a + b);\n        tmp32 = (tmp32 + 16) >> 5;\n        CLIP_RESULT(tmp32)\n        dst[24] = tmp32;  // 10th\n\n        dst -= ((24 << 4) - 1);\n        ref -= ((24 << 4) - 1);\n    }\n\n    return ;\n}\n\n\nvoid GenerateQuartPelPred(uint8 **bilin_base, uint8 *qpel_cand, int hpel_pos)\n{\n    // for even value of hpel_pos, start with pattern 1, otherwise, start with pattern 2\n    int i, j;\n\n    uint8 *c1 = qpel_cand;\n    uint8 *tl = bilin_base[0];\n    uint8 *tr = bilin_base[1];\n    uint8 *bl = bilin_base[2];\n    uint8 *br = bilin_base[3];\n    int a, b, c, d;\n    int offset = 1 - (384 * 7);\n\n    if (!(hpel_pos&1)) // diamond pattern\n    {\n        j = 16;\n        while (j--)\n        {\n            i = 16;\n            while (i--)\n            {\n                d = tr[24];\n                a = *tr++;\n                b = bl[1];\n                c = *br++;\n\n                *c1 = (c + a + 1) >> 1;\n                *(c1 += 384) = (b + a + 1) >> 1; /* c2 */\n                *(c1 += 384) = (b + c + 1) >> 1; /* c3 */\n                *(c1 += 384) = (b + d + 1) >> 1; /* c4 */\n\n                b = *bl++;\n\n                *(c1 += 384) = (c + d + 1) >> 1;  /* c5 */\n                *(c1 += 384) = (b + d + 1) >> 1;  /* c6 */\n                *(c1 += 384) = (b + c + 1) >> 1;  /* c7 */\n                *(c1 += 384) = (b + a + 1) >> 1;  /* c8 */\n\n                c1 += offset;\n            }\n            // advance to the next line, pitch is 24\n            tl += 8;\n            tr += 8;\n            bl += 8;\n            br += 8;\n            c1 += 8;\n        }\n    }\n    else // star pattern\n    {\n        j = 16;\n        while (j--)\n        {\n            i = 16;\n            while (i--)\n            {\n                a = *br++;\n                b = *tr++;\n                c = tl[1];\n                *c1 = (a + b + 1) >> 1;\n                b = bl[1];\n                *(c1 += 384) = (a + c + 1) >> 1; /* c2 */\n                c = tl[25];\n                *(c1 += 384) = (a + b + 1) >> 1; /* c3 */\n                b = tr[23];\n                *(c1 += 384) = (a + c + 1) >> 1; /* c4 */\n                c = tl[24];\n                *(c1 += 384) = (a + b + 1) >> 1; /* c5 */\n                b = *bl++;\n                *(c1 += 384) = (a + c + 1) >> 1; /* c6 */\n                c = *tl++;\n                *(c1 += 384) = (a + b + 1) >> 1; /* c7 */\n                *(c1 += 384) = (a + c + 1) >> 1; /* c8 */\n\n                c1 += offset;\n            }\n            // advance to the next line, pitch is 24\n            tl += 8;\n            tr += 8;\n            bl += 8;\n            br += 8;\n            c1 += 8;\n        }\n    }\n\n    return ;\n}\n\n\n/* assuming cand always has a pitch of 24 */\nint SATD_MB(uint8 *cand, uint8 *cur, int dmin)\n{\n    int cost;\n\n\n    dmin = (dmin << 16) | 24;\n    cost = AVCSAD_Macroblock_C(cand, cur, dmin, NULL);\n\n    return cost;\n}\n\n\n\n\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/header.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"avcenc_lib.h\"\n#include \"avcenc_api.h\"\n\n/** see subclause 7.4.2.1 */\n/* no need for checking the valid range , already done in SetEncodeParam(),\nif we have to send another SPS, the ranges should be verified first before\nusers call PVAVCEncodeSPS() */\nAVCEnc_Status EncodeSPS(AVCEncObject *encvid, AVCEncBitstream *stream)\n{\n    AVCCommonObj *video = encvid->common;\n    AVCSeqParamSet *seqParam = video->currSeqParams;\n    AVCVUIParams *vui = &(seqParam->vui_parameters);\n    int i;\n    AVCEnc_Status status = AVCENC_SUCCESS;\n\n    //DEBUG_LOG(userData,AVC_LOGTYPE_INFO,\"EncodeSPS\",-1,-1);\n\n    status = BitstreamWriteBits(stream, 8, seqParam->profile_idc);\n    status = BitstreamWrite1Bit(stream, seqParam->constrained_set0_flag);\n    status = BitstreamWrite1Bit(stream, seqParam->constrained_set1_flag);\n    status = BitstreamWrite1Bit(stream, seqParam->constrained_set2_flag);\n    status = BitstreamWrite1Bit(stream, seqParam->constrained_set3_flag);\n    status = BitstreamWriteBits(stream, 4, 0);  /* forbidden zero bits */\n    if (status != AVCENC_SUCCESS)  /* we can check after each write also */\n    {\n        return status;\n    }\n\n    status = BitstreamWriteBits(stream, 8, seqParam->level_idc);\n    status = ue_v(stream, seqParam->seq_parameter_set_id);\n    status = ue_v(stream, seqParam->log2_max_frame_num_minus4);\n    status = ue_v(stream, seqParam->pic_order_cnt_type);\n    if (status != AVCENC_SUCCESS)\n    {\n        return status;\n    }\n\n    if (seqParam->pic_order_cnt_type == 0)\n    {\n        status = ue_v(stream, seqParam->log2_max_pic_order_cnt_lsb_minus4);\n    }\n    else if (seqParam->pic_order_cnt_type == 1)\n    {\n        status = BitstreamWrite1Bit(stream, seqParam->delta_pic_order_always_zero_flag);\n        status = se_v(stream, seqParam->offset_for_non_ref_pic); /* upto 32 bits */\n        status = se_v(stream, seqParam->offset_for_top_to_bottom_field); /* upto 32 bits */\n        status = ue_v(stream, seqParam->num_ref_frames_in_pic_order_cnt_cycle);\n\n        for (i = 0; i < (int)(seqParam->num_ref_frames_in_pic_order_cnt_cycle); i++)\n        {\n            status = se_v(stream, seqParam->offset_for_ref_frame[i]); /* upto 32 bits */\n        }\n    }\n    if (status != AVCENC_SUCCESS)\n    {\n        return status;\n    }\n\n    status = ue_v(stream, seqParam->num_ref_frames);\n    status = BitstreamWrite1Bit(stream, seqParam->gaps_in_frame_num_value_allowed_flag);\n    status = ue_v(stream, seqParam->pic_width_in_mbs_minus1);\n    status = ue_v(stream, seqParam->pic_height_in_map_units_minus1);\n    status = BitstreamWrite1Bit(stream, seqParam->frame_mbs_only_flag);\n    if (status != AVCENC_SUCCESS)\n    {\n        return status;\n    }\n    /* if frame_mbs_only_flag is 0, then write, mb_adaptive_frame_field_frame here */\n\n    status = BitstreamWrite1Bit(stream, seqParam->direct_8x8_inference_flag);\n    status = BitstreamWrite1Bit(stream, seqParam->frame_cropping_flag);\n    if (seqParam->frame_cropping_flag)\n    {\n        status = ue_v(stream, seqParam->frame_crop_left_offset);\n        status = ue_v(stream, seqParam->frame_crop_right_offset);\n        status = ue_v(stream, seqParam->frame_crop_top_offset);\n        status = ue_v(stream, seqParam->frame_crop_bottom_offset);\n    }\n    if (status != AVCENC_SUCCESS)\n    {\n        return status;\n    }\n\n    status = BitstreamWrite1Bit(stream, seqParam->vui_parameters_present_flag);\n    if (seqParam->vui_parameters_present_flag)\n    {\n        /* not supported */\n        //return AVCENC_SPS_FAIL;\n        EncodeVUI(stream, vui);\n    }\n\n    return status;\n}\n\n\nvoid EncodeVUI(AVCEncBitstream* stream, AVCVUIParams* vui)\n{\n    int temp;\n\n    temp = vui->aspect_ratio_info_present_flag;\n    BitstreamWrite1Bit(stream, temp);\n    if (temp)\n    {\n        BitstreamWriteBits(stream, 8, vui->aspect_ratio_idc);\n        if (vui->aspect_ratio_idc == 255)\n        {\n            BitstreamWriteBits(stream, 16, vui->sar_width);\n            BitstreamWriteBits(stream, 16, vui->sar_height);\n        }\n    }\n    temp = vui->overscan_info_present_flag;\n    BitstreamWrite1Bit(stream, temp);\n    if (temp)\n    {\n        BitstreamWrite1Bit(stream, vui->overscan_appropriate_flag);\n    }\n    temp = vui->video_signal_type_present_flag;\n    BitstreamWrite1Bit(stream, temp);\n    if (temp)\n    {\n        BitstreamWriteBits(stream, 3, vui->video_format);\n        BitstreamWrite1Bit(stream, vui->video_full_range_flag);\n        temp = vui->colour_description_present_flag;\n        BitstreamWrite1Bit(stream, temp);\n        if (temp)\n        {\n            BitstreamWriteBits(stream, 8, vui->colour_primaries);\n            BitstreamWriteBits(stream, 8, vui->transfer_characteristics);\n            BitstreamWriteBits(stream, 8, vui->matrix_coefficients);\n        }\n    }\n    temp = vui->chroma_location_info_present_flag;\n    BitstreamWrite1Bit(stream, temp);\n    if (temp)\n    {\n        ue_v(stream, vui->chroma_sample_loc_type_top_field);\n        ue_v(stream, vui->chroma_sample_loc_type_bottom_field);\n    }\n\n    temp = vui->timing_info_present_flag;\n    BitstreamWrite1Bit(stream, temp);\n    if (temp)\n    {\n        BitstreamWriteBits(stream, 32, vui->num_units_in_tick);\n        BitstreamWriteBits(stream, 32, vui->time_scale);\n        BitstreamWrite1Bit(stream, vui->fixed_frame_rate_flag);\n    }\n\n    temp = vui->nal_hrd_parameters_present_flag;\n    BitstreamWrite1Bit(stream, temp);\n    if (temp)\n    {\n        EncodeHRD(stream, &(vui->nal_hrd_parameters));\n    }\n    temp = vui->vcl_hrd_parameters_present_flag;\n    BitstreamWrite1Bit(stream, temp);\n    if (temp)\n    {\n        EncodeHRD(stream, &(vui->vcl_hrd_parameters));\n    }\n    if (vui->nal_hrd_parameters_present_flag || vui->vcl_hrd_parameters_present_flag)\n    {\n        BitstreamWrite1Bit(stream, vui->low_delay_hrd_flag);\n    }\n    BitstreamWrite1Bit(stream, vui->pic_struct_present_flag);\n    temp = vui->bitstream_restriction_flag;\n    BitstreamWrite1Bit(stream, temp);\n    if (temp)\n    {\n        BitstreamWrite1Bit(stream, vui->motion_vectors_over_pic_boundaries_flag);\n        ue_v(stream, vui->max_bytes_per_pic_denom);\n        ue_v(stream, vui->max_bits_per_mb_denom);\n        ue_v(stream, vui->log2_max_mv_length_horizontal);\n        ue_v(stream, vui->log2_max_mv_length_vertical);\n        ue_v(stream, vui->max_dec_frame_reordering);\n        ue_v(stream, vui->max_dec_frame_buffering);\n    }\n\n    return ;\n}\n\n\nvoid EncodeHRD(AVCEncBitstream* stream, AVCHRDParams* hrd)\n{\n    int i;\n\n    ue_v(stream, hrd->cpb_cnt_minus1);\n    BitstreamWriteBits(stream, 4, hrd->bit_rate_scale);\n    BitstreamWriteBits(stream, 4, hrd->cpb_size_scale);\n    for (i = 0; i <= (int)hrd->cpb_cnt_minus1; i++)\n    {\n        ue_v(stream, hrd->bit_rate_value_minus1[i]);\n        ue_v(stream, hrd->cpb_size_value_minus1[i]);\n        ue_v(stream, hrd->cbr_flag[i]);\n    }\n    BitstreamWriteBits(stream, 5, hrd->initial_cpb_removal_delay_length_minus1);\n    BitstreamWriteBits(stream, 5, hrd->cpb_removal_delay_length_minus1);\n    BitstreamWriteBits(stream, 5, hrd->dpb_output_delay_length_minus1);\n    BitstreamWriteBits(stream, 5, hrd->time_offset_length);\n\n    return ;\n}\n\n\n\n/** see subclause 7.4.2.2 */\n/* no need for checking the valid range , already done in SetEncodeParam().\nIf we have to send another SPS, the ranges should be verified first before\nusers call PVAVCEncodeSPS()*/\nAVCEnc_Status EncodePPS(AVCEncObject *encvid, AVCEncBitstream *stream)\n{\n    AVCCommonObj *video = encvid->common;\n    AVCEnc_Status status = AVCENC_SUCCESS;\n    AVCPicParamSet *picParam = video->currPicParams;\n    int i, iGroup, numBits;\n    uint temp;\n\n    status = ue_v(stream, picParam->pic_parameter_set_id);\n    status = ue_v(stream, picParam->seq_parameter_set_id);\n    status = BitstreamWrite1Bit(stream, picParam->entropy_coding_mode_flag);\n    status = BitstreamWrite1Bit(stream, picParam->pic_order_present_flag);\n    if (status != AVCENC_SUCCESS)\n    {\n        return status;\n    }\n\n    status = ue_v(stream, picParam->num_slice_groups_minus1);\n    if (picParam->num_slice_groups_minus1 > 0)\n    {\n        status = ue_v(stream, picParam->slice_group_map_type);\n        if (picParam->slice_group_map_type == 0)\n        {\n            for (iGroup = 0; iGroup <= (int)picParam->num_slice_groups_minus1; iGroup++)\n            {\n                status = ue_v(stream, picParam->run_length_minus1[iGroup]);\n            }\n        }\n        else if (picParam->slice_group_map_type == 2)\n        {\n            for (iGroup = 0; iGroup < (int)picParam->num_slice_groups_minus1; iGroup++)\n            {\n                status = ue_v(stream, picParam->top_left[iGroup]);\n                status = ue_v(stream, picParam->bottom_right[iGroup]);\n            }\n        }\n        else if (picParam->slice_group_map_type == 3 ||\n                 picParam->slice_group_map_type == 4 ||\n                 picParam->slice_group_map_type == 5)\n        {\n            status = BitstreamWrite1Bit(stream, picParam->slice_group_change_direction_flag);\n            status = ue_v(stream, picParam->slice_group_change_rate_minus1);\n        }\n        else /*if(picParam->slice_group_map_type == 6)*/\n        {\n            status = ue_v(stream, picParam->pic_size_in_map_units_minus1);\n\n            numBits = 0;/* ceil(log2(num_slice_groups_minus1+1)) bits */\n            i = picParam->num_slice_groups_minus1;\n            while (i > 0)\n            {\n                numBits++;\n                i >>= 1;\n            }\n\n            for (i = 0; i <= (int)picParam->pic_size_in_map_units_minus1; i++)\n            {\n                status = BitstreamWriteBits(stream, numBits, picParam->slice_group_id[i]);\n            }\n        }\n    }\n    if (status != AVCENC_SUCCESS)\n    {\n        return status;\n    }\n\n    status = ue_v(stream, picParam->num_ref_idx_l0_active_minus1);\n    status = ue_v(stream, picParam->num_ref_idx_l1_active_minus1);\n    status = BitstreamWrite1Bit(stream, picParam->weighted_pred_flag);\n    status = BitstreamWriteBits(stream, 2, picParam->weighted_bipred_idc);\n    if (status != AVCENC_SUCCESS)\n    {\n        return status;\n    }\n\n    status = se_v(stream, picParam->pic_init_qp_minus26);\n    status = se_v(stream, picParam->pic_init_qs_minus26);\n    status = se_v(stream, picParam->chroma_qp_index_offset);\n\n    temp = picParam->deblocking_filter_control_present_flag << 2;\n    temp |= (picParam->constrained_intra_pred_flag << 1);\n    temp |= picParam->redundant_pic_cnt_present_flag;\n\n    status = BitstreamWriteBits(stream, 3, temp);\n\n    return status;\n}\n\n/** see subclause 7.4.3 */\nAVCEnc_Status EncodeSliceHeader(AVCEncObject *encvid, AVCEncBitstream *stream)\n{\n    AVCCommonObj *video = encvid->common;\n    AVCSliceHeader *sliceHdr = video->sliceHdr;\n    AVCPicParamSet *currPPS = video->currPicParams;\n    AVCSeqParamSet *currSPS = video->currSeqParams;\n    AVCEnc_Status status = AVCENC_SUCCESS;\n    int slice_type, temp, i;\n    int num_bits;\n\n    num_bits = (stream->write_pos << 3) - stream->bit_left;\n\n    status = ue_v(stream, sliceHdr->first_mb_in_slice);\n\n    slice_type = video->slice_type;\n\n    if (video->mbNum == 0) /* first mb in frame */\n    {\n        status = ue_v(stream, sliceHdr->slice_type);\n    }\n    else\n    {\n        status = ue_v(stream, slice_type);\n    }\n\n    status = ue_v(stream, sliceHdr->pic_parameter_set_id);\n\n    status = BitstreamWriteBits(stream, currSPS->log2_max_frame_num_minus4 + 4, sliceHdr->frame_num);\n\n    if (status != AVCENC_SUCCESS)\n    {\n        return status;\n    }\n    /* if frame_mbs_only_flag is 0, encode field_pic_flag, bottom_field_flag here */\n\n    if (video->nal_unit_type == AVC_NALTYPE_IDR)\n    {\n        status = ue_v(stream, sliceHdr->idr_pic_id);\n    }\n\n    if (currSPS->pic_order_cnt_type == 0)\n    {\n        status = BitstreamWriteBits(stream, currSPS->log2_max_pic_order_cnt_lsb_minus4 + 4,\n                                    sliceHdr->pic_order_cnt_lsb);\n\n        if (currPPS->pic_order_present_flag && !sliceHdr->field_pic_flag)\n        {\n            status = se_v(stream, sliceHdr->delta_pic_order_cnt_bottom); /* 32 bits */\n        }\n    }\n    if (currSPS->pic_order_cnt_type == 1 && !currSPS->delta_pic_order_always_zero_flag)\n    {\n        status = se_v(stream, sliceHdr->delta_pic_order_cnt[0]);    /* 32 bits */\n        if (currPPS->pic_order_present_flag && !sliceHdr->field_pic_flag)\n        {\n            status = se_v(stream, sliceHdr->delta_pic_order_cnt[1]); /* 32 bits */\n        }\n    }\n\n    if (currPPS->redundant_pic_cnt_present_flag)\n    {\n        status = ue_v(stream, sliceHdr->redundant_pic_cnt);\n    }\n\n    if (slice_type == AVC_B_SLICE)\n    {\n        status = BitstreamWrite1Bit(stream, sliceHdr->direct_spatial_mv_pred_flag);\n    }\n\n    if (status != AVCENC_SUCCESS)\n    {\n        return status;\n    }\n\n    if (slice_type == AVC_P_SLICE || slice_type == AVC_SP_SLICE || slice_type == AVC_B_SLICE)\n    {\n        status = BitstreamWrite1Bit(stream, sliceHdr->num_ref_idx_active_override_flag);\n        if (sliceHdr->num_ref_idx_active_override_flag)\n        {\n            /* we shouldn't enter this part at all */\n            status = ue_v(stream, sliceHdr->num_ref_idx_l0_active_minus1);\n            if (slice_type == AVC_B_SLICE)\n            {\n                status = ue_v(stream, sliceHdr->num_ref_idx_l1_active_minus1);\n            }\n        }\n    }\n    if (status != AVCENC_SUCCESS)\n    {\n        return status;\n    }\n\n    /* ref_pic_list_reordering() */\n    status = ref_pic_list_reordering(video, stream, sliceHdr, slice_type);\n    if (status != AVCENC_SUCCESS)\n    {\n        return status;\n    }\n\n    if ((currPPS->weighted_pred_flag && (slice_type == AVC_P_SLICE || slice_type == AVC_SP_SLICE)) ||\n            (currPPS->weighted_bipred_idc == 1 && slice_type == AVC_B_SLICE))\n    {\n        //      pred_weight_table(); // not supported !!\n        return AVCENC_PRED_WEIGHT_TAB_FAIL;\n    }\n\n    if (video->nal_ref_idc != 0)\n    {\n        status = dec_ref_pic_marking(video, stream, sliceHdr);\n        if (status != AVCENC_SUCCESS)\n        {\n            return status;\n        }\n    }\n\n    if (currPPS->entropy_coding_mode_flag && slice_type != AVC_I_SLICE && slice_type != AVC_SI_SLICE)\n    {\n        return AVCENC_CABAC_FAIL;\n        /*      ue_v(stream,&(sliceHdr->cabac_init_idc));\n                if(sliceHdr->cabac_init_idc > 2){\n                    // not supported !!!!\n                }*/\n    }\n\n    status = se_v(stream, sliceHdr->slice_qp_delta);\n    if (status != AVCENC_SUCCESS)\n    {\n        return status;\n    }\n\n    if (slice_type == AVC_SP_SLICE || slice_type == AVC_SI_SLICE)\n    {\n        if (slice_type == AVC_SP_SLICE)\n        {\n            status = BitstreamWrite1Bit(stream, sliceHdr->sp_for_switch_flag);\n            /* if sp_for_switch_flag is 0, P macroblocks in SP slice is decoded using\n            SP decoding process for non-switching pictures in 8.6.1 */\n            /* else, P macroblocks in SP slice is decoded using SP and SI decoding\n            process for switching picture in 8.6.2 */\n        }\n        status = se_v(stream, sliceHdr->slice_qs_delta);\n        if (status != AVCENC_SUCCESS)\n        {\n            return status;\n        }\n    }\n\n    if (currPPS->deblocking_filter_control_present_flag)\n    {\n\n        status = ue_v(stream, sliceHdr->disable_deblocking_filter_idc);\n\n        if (sliceHdr->disable_deblocking_filter_idc != 1)\n        {\n            status = se_v(stream, sliceHdr->slice_alpha_c0_offset_div2);\n\n            status = se_v(stream, sliceHdr->slice_beta_offset_div_2);\n        }\n        if (status != AVCENC_SUCCESS)\n        {\n            return status;\n        }\n    }\n\n    if (currPPS->num_slice_groups_minus1 > 0 && currPPS->slice_group_map_type >= 3\n            && currPPS->slice_group_map_type <= 5)\n    {\n        /* Ceil(Log2(PicSizeInMapUnits/(float)SliceGroupChangeRate + 1)) */\n        temp = video->PicSizeInMapUnits / video->SliceGroupChangeRate;\n        if (video->PicSizeInMapUnits % video->SliceGroupChangeRate)\n        {\n            temp++;\n        }\n        i = 0;\n        while (temp > 1)\n        {\n            temp >>= 1;\n            i++;\n        }\n\n        BitstreamWriteBits(stream, i, sliceHdr->slice_group_change_cycle);\n    }\n\n\n    encvid->rateCtrl->NumberofHeaderBits += (stream->write_pos << 3) - stream->bit_left - num_bits;\n\n    return AVCENC_SUCCESS;\n}\n\n/** see subclause 7.4.3.1 */\nAVCEnc_Status ref_pic_list_reordering(AVCCommonObj *video, AVCEncBitstream *stream, AVCSliceHeader *sliceHdr, int slice_type)\n{\n    (void)(video);\n    int i;\n    AVCEnc_Status status = AVCENC_SUCCESS;\n\n    if (slice_type != AVC_I_SLICE && slice_type != AVC_SI_SLICE)\n    {\n        status = BitstreamWrite1Bit(stream, sliceHdr->ref_pic_list_reordering_flag_l0);\n        if (sliceHdr->ref_pic_list_reordering_flag_l0)\n        {\n            i = 0;\n            do\n            {\n                status = ue_v(stream, sliceHdr->reordering_of_pic_nums_idc_l0[i]);\n                if (sliceHdr->reordering_of_pic_nums_idc_l0[i] == 0 ||\n                        sliceHdr->reordering_of_pic_nums_idc_l0[i] == 1)\n                {\n                    status = ue_v(stream, sliceHdr->abs_diff_pic_num_minus1_l0[i]);\n                    /* this check should be in InitSlice(), if we ever use it */\n                    /*if(sliceHdr->reordering_of_pic_nums_idc_l0[i] == 0 &&\n                        sliceHdr->abs_diff_pic_num_minus1_l0[i] > video->MaxPicNum/2 -1)\n                    {\n                        return AVCENC_REF_PIC_REORDER_FAIL; // out of range\n                    }\n                    if(sliceHdr->reordering_of_pic_nums_idc_l0[i] == 1 &&\n                        sliceHdr->abs_diff_pic_num_minus1_l0[i] > video->MaxPicNum/2 -2)\n                    {\n                        return AVCENC_REF_PIC_REORDER_FAIL; // out of range\n                    }*/\n                }\n                else if (sliceHdr->reordering_of_pic_nums_idc_l0[i] == 2)\n                {\n                    status = ue_v(stream, sliceHdr->long_term_pic_num_l0[i]);\n                }\n                i++;\n            }\n            while (sliceHdr->reordering_of_pic_nums_idc_l0[i] != 3\n                    && i <= (int)sliceHdr->num_ref_idx_l0_active_minus1 + 1) ;\n        }\n    }\n    if (slice_type == AVC_B_SLICE)\n    {\n        status = BitstreamWrite1Bit(stream, sliceHdr->ref_pic_list_reordering_flag_l1);\n        if (sliceHdr->ref_pic_list_reordering_flag_l1)\n        {\n            i = 0;\n            do\n            {\n                status = ue_v(stream, sliceHdr->reordering_of_pic_nums_idc_l1[i]);\n                if (sliceHdr->reordering_of_pic_nums_idc_l1[i] == 0 ||\n                        sliceHdr->reordering_of_pic_nums_idc_l1[i] == 1)\n                {\n                    status = ue_v(stream, sliceHdr->abs_diff_pic_num_minus1_l1[i]);\n                    /* This check should be in InitSlice() if we ever use it\n                    if(sliceHdr->reordering_of_pic_nums_idc_l1[i] == 0 &&\n                        sliceHdr->abs_diff_pic_num_minus1_l1[i] > video->MaxPicNum/2 -1)\n                    {\n                        return AVCENC_REF_PIC_REORDER_FAIL; // out of range\n                    }\n                    if(sliceHdr->reordering_of_pic_nums_idc_l1[i] == 1 &&\n                        sliceHdr->abs_diff_pic_num_minus1_l1[i] > video->MaxPicNum/2 -2)\n                    {\n                        return AVCENC_REF_PIC_REORDER_FAIL; // out of range\n                    }*/\n                }\n                else if (sliceHdr->reordering_of_pic_nums_idc_l1[i] == 2)\n                {\n                    status = ue_v(stream, sliceHdr->long_term_pic_num_l1[i]);\n                }\n                i++;\n            }\n            while (sliceHdr->reordering_of_pic_nums_idc_l1[i] != 3\n                    && i <= (int)sliceHdr->num_ref_idx_l1_active_minus1 + 1) ;\n        }\n    }\n\n    return status;\n}\n\n/** see subclause 7.4.3.3 */\nAVCEnc_Status dec_ref_pic_marking(AVCCommonObj *video, AVCEncBitstream *stream, AVCSliceHeader *sliceHdr)\n{\n    int i;\n    AVCEnc_Status status = AVCENC_SUCCESS;\n\n    if (video->nal_unit_type == AVC_NALTYPE_IDR)\n    {\n        status = BitstreamWrite1Bit(stream, sliceHdr->no_output_of_prior_pics_flag);\n        status = BitstreamWrite1Bit(stream, sliceHdr->long_term_reference_flag);\n        if (sliceHdr->long_term_reference_flag == 0) /* used for short-term */\n        {\n            video->MaxLongTermFrameIdx = -1; /* no long-term frame indx */\n        }\n        else /* used for long-term */\n        {\n            video->MaxLongTermFrameIdx = 0;\n            video->LongTermFrameIdx = 0;\n        }\n    }\n    else\n    {\n        status = BitstreamWrite1Bit(stream, sliceHdr->adaptive_ref_pic_marking_mode_flag); /* default to zero */\n        if (sliceHdr->adaptive_ref_pic_marking_mode_flag)\n        {\n            i = 0;\n            do\n            {\n                status = ue_v(stream, sliceHdr->memory_management_control_operation[i]);\n                if (sliceHdr->memory_management_control_operation[i] == 1 ||\n                        sliceHdr->memory_management_control_operation[i] == 3)\n                {\n                    status = ue_v(stream, sliceHdr->difference_of_pic_nums_minus1[i]);\n                }\n                if (sliceHdr->memory_management_control_operation[i] == 2)\n                {\n                    status = ue_v(stream, sliceHdr->long_term_pic_num[i]);\n                }\n                if (sliceHdr->memory_management_control_operation[i] == 3 ||\n                        sliceHdr->memory_management_control_operation[i] == 6)\n                {\n                    status = ue_v(stream, sliceHdr->long_term_frame_idx[i]);\n                }\n                if (sliceHdr->memory_management_control_operation[i] == 4)\n                {\n                    status = ue_v(stream, sliceHdr->max_long_term_frame_idx_plus1[i]);\n                }\n                i++;\n            }\n            while (sliceHdr->memory_management_control_operation[i] != 0 && i < MAX_DEC_REF_PIC_MARKING);\n            if (i >= MAX_DEC_REF_PIC_MARKING && sliceHdr->memory_management_control_operation[i] != 0)\n            {\n                return AVCENC_DEC_REF_PIC_MARK_FAIL; /* we're screwed!!, not enough memory */\n            }\n        }\n    }\n\n    return status;\n}\n\n/* see subclause 8.2.1 Decoding process for picture order count.\nSee also PostPOC() for initialization of some variables. */\nAVCEnc_Status InitPOC(AVCEncObject *encvid)\n{\n    AVCCommonObj *video = encvid->common;\n    AVCSeqParamSet *currSPS = video->currSeqParams;\n    AVCSliceHeader *sliceHdr = video->sliceHdr;\n    AVCFrameIO  *currInput = encvid->currInput;\n    int i;\n\n    switch (currSPS->pic_order_cnt_type)\n    {\n        case 0: /* POC MODE 0 , subclause 8.2.1.1 */\n            /* encoding part */\n            if (video->nal_unit_type == AVC_NALTYPE_IDR)\n            {\n                encvid->dispOrdPOCRef = currInput->disp_order;\n            }\n            while (currInput->disp_order < encvid->dispOrdPOCRef)\n            {\n                encvid->dispOrdPOCRef -= video->MaxPicOrderCntLsb;\n            }\n            sliceHdr->pic_order_cnt_lsb = currInput->disp_order - encvid->dispOrdPOCRef;\n            while (sliceHdr->pic_order_cnt_lsb >= video->MaxPicOrderCntLsb)\n            {\n                sliceHdr->pic_order_cnt_lsb -= video->MaxPicOrderCntLsb;\n            }\n            /* decoding part */\n            /* Calculate the MSBs of current picture */\n            if (video->nal_unit_type == AVC_NALTYPE_IDR)\n            {\n                video->prevPicOrderCntMsb = 0;\n                video->prevPicOrderCntLsb = 0;\n            }\n            if (sliceHdr->pic_order_cnt_lsb  <  video->prevPicOrderCntLsb  &&\n                    (video->prevPicOrderCntLsb - sliceHdr->pic_order_cnt_lsb)  >= (video->MaxPicOrderCntLsb / 2))\n                video->PicOrderCntMsb = video->prevPicOrderCntMsb + video->MaxPicOrderCntLsb;\n            else if (sliceHdr->pic_order_cnt_lsb  >  video->prevPicOrderCntLsb  &&\n                     (sliceHdr->pic_order_cnt_lsb - video->prevPicOrderCntLsb)  > (video->MaxPicOrderCntLsb / 2))\n                video->PicOrderCntMsb = video->prevPicOrderCntMsb - video->MaxPicOrderCntLsb;\n            else\n                video->PicOrderCntMsb = video->prevPicOrderCntMsb;\n\n            /* JVT-I010 page 81 is different from JM7.3 */\n            if (!sliceHdr->field_pic_flag || !sliceHdr->bottom_field_flag)\n            {\n                video->PicOrderCnt = video->TopFieldOrderCnt = video->PicOrderCntMsb + sliceHdr->pic_order_cnt_lsb;\n            }\n\n            if (!sliceHdr->field_pic_flag)\n            {\n                video->BottomFieldOrderCnt = video->TopFieldOrderCnt + sliceHdr->delta_pic_order_cnt_bottom;\n            }\n            else if (sliceHdr->bottom_field_flag)\n            {\n                video->PicOrderCnt = video->BottomFieldOrderCnt = video->PicOrderCntMsb + sliceHdr->pic_order_cnt_lsb;\n            }\n\n            if (!sliceHdr->field_pic_flag)\n            {\n                video->PicOrderCnt = AVC_MIN(video->TopFieldOrderCnt, video->BottomFieldOrderCnt);\n            }\n\n            if (video->currPicParams->pic_order_present_flag && !sliceHdr->field_pic_flag)\n            {\n                sliceHdr->delta_pic_order_cnt_bottom = 0; /* defaulted to zero */\n            }\n\n            break;\n        case 1: /* POC MODE 1, subclause 8.2.1.2 */\n            /* calculate FrameNumOffset */\n            if (video->nal_unit_type == AVC_NALTYPE_IDR)\n            {\n                encvid->dispOrdPOCRef = currInput->disp_order;  /* reset the reference point */\n                video->prevFrameNumOffset = 0;\n                video->FrameNumOffset = 0;\n            }\n            else if (video->prevFrameNum > sliceHdr->frame_num)\n            {\n                video->FrameNumOffset = video->prevFrameNumOffset + video->MaxFrameNum;\n            }\n            else\n            {\n                video->FrameNumOffset = video->prevFrameNumOffset;\n            }\n            /* calculate absFrameNum */\n            if (currSPS->num_ref_frames_in_pic_order_cnt_cycle)\n            {\n                video->absFrameNum = video->FrameNumOffset + sliceHdr->frame_num;\n            }\n            else\n            {\n                video->absFrameNum = 0;\n            }\n\n            if (video->absFrameNum > 0 && video->nal_ref_idc == 0)\n            {\n                video->absFrameNum--;\n            }\n\n            /* derive picOrderCntCycleCnt and frameNumInPicOrderCntCycle */\n            if (video->absFrameNum > 0)\n            {\n                video->picOrderCntCycleCnt = (video->absFrameNum - 1) / currSPS->num_ref_frames_in_pic_order_cnt_cycle;\n                video->frameNumInPicOrderCntCycle = (video->absFrameNum - 1) % currSPS->num_ref_frames_in_pic_order_cnt_cycle;\n            }\n            /* derive expectedDeltaPerPicOrderCntCycle, this value can be computed up front. */\n            video->expectedDeltaPerPicOrderCntCycle = 0;\n            for (i = 0; i < (int)currSPS->num_ref_frames_in_pic_order_cnt_cycle; i++)\n            {\n                video->expectedDeltaPerPicOrderCntCycle += currSPS->offset_for_ref_frame[i];\n            }\n            /* derive expectedPicOrderCnt */\n            if (video->absFrameNum)\n            {\n                video->expectedPicOrderCnt = video->picOrderCntCycleCnt * video->expectedDeltaPerPicOrderCntCycle;\n                for (i = 0; i <= video->frameNumInPicOrderCntCycle; i++)\n                {\n                    video->expectedPicOrderCnt += currSPS->offset_for_ref_frame[i];\n                }\n            }\n            else\n            {\n                video->expectedPicOrderCnt = 0;\n            }\n\n            if (video->nal_ref_idc == 0)\n            {\n                video->expectedPicOrderCnt += currSPS->offset_for_non_ref_pic;\n            }\n            /* derive TopFieldOrderCnt and BottomFieldOrderCnt */\n            /* encoding part */\n            if (!currSPS->delta_pic_order_always_zero_flag)\n            {\n                sliceHdr->delta_pic_order_cnt[0] = currInput->disp_order - encvid->dispOrdPOCRef - video->expectedPicOrderCnt;\n\n                if (video->currPicParams->pic_order_present_flag && !sliceHdr->field_pic_flag)\n                {\n                    sliceHdr->delta_pic_order_cnt[1] = sliceHdr->delta_pic_order_cnt[0]; /* should be calculated from currInput->bottom_field->disp_order */\n                }\n                else\n                {\n                    sliceHdr->delta_pic_order_cnt[1] = 0;\n                }\n            }\n            else\n            {\n                sliceHdr->delta_pic_order_cnt[0] = sliceHdr->delta_pic_order_cnt[1] = 0;\n            }\n\n            if (sliceHdr->field_pic_flag == 0)\n            {\n                video->TopFieldOrderCnt = video->expectedPicOrderCnt + sliceHdr->delta_pic_order_cnt[0];\n                video->BottomFieldOrderCnt = video->TopFieldOrderCnt + currSPS->offset_for_top_to_bottom_field + sliceHdr->delta_pic_order_cnt[1];\n\n                video->PicOrderCnt = AVC_MIN(video->TopFieldOrderCnt, video->BottomFieldOrderCnt);\n            }\n            else if (sliceHdr->bottom_field_flag == 0)\n            {\n                video->TopFieldOrderCnt = video->expectedPicOrderCnt + sliceHdr->delta_pic_order_cnt[0];\n                video->PicOrderCnt = video->TopFieldOrderCnt;\n            }\n            else\n            {\n                video->BottomFieldOrderCnt = video->expectedPicOrderCnt + currSPS->offset_for_top_to_bottom_field + sliceHdr->delta_pic_order_cnt[0];\n                video->PicOrderCnt = video->BottomFieldOrderCnt;\n            }\n            break;\n\n\n        case 2: /* POC MODE 2, subclause 8.2.1.3 */\n            /* decoding order must be the same as display order */\n            /* we don't check for that. The decoder will just output in decoding order. */\n            /* Check for 2 consecutive non-reference frame */\n            if (video->nal_ref_idc == 0)\n            {\n                if (encvid->dispOrdPOCRef == 1)\n                {\n                    return AVCENC_CONSECUTIVE_NONREF;\n                }\n                encvid->dispOrdPOCRef = 1;  /* act as a flag for non ref */\n            }\n            else\n            {\n                encvid->dispOrdPOCRef = 0;\n            }\n\n\n            if (video->nal_unit_type == AVC_NALTYPE_IDR)\n            {\n                video->FrameNumOffset = 0;\n            }\n            else if (video->prevFrameNum > sliceHdr->frame_num)\n            {\n                video->FrameNumOffset = video->prevFrameNumOffset + video->MaxFrameNum;\n            }\n            else\n            {\n                video->FrameNumOffset = video->prevFrameNumOffset;\n            }\n            /* derive tempPicOrderCnt, we just use PicOrderCnt */\n            if (video->nal_unit_type == AVC_NALTYPE_IDR)\n            {\n                video->PicOrderCnt = 0;\n            }\n            else if (video->nal_ref_idc == 0)\n            {\n                video->PicOrderCnt = 2 * (video->FrameNumOffset + sliceHdr->frame_num) - 1;\n            }\n            else\n            {\n                video->PicOrderCnt = 2 * (video->FrameNumOffset + sliceHdr->frame_num);\n            }\n            /* derive TopFieldOrderCnt and BottomFieldOrderCnt */\n            if (sliceHdr->field_pic_flag == 0)\n            {\n                video->TopFieldOrderCnt = video->BottomFieldOrderCnt = video->PicOrderCnt;\n            }\n            else if (sliceHdr->bottom_field_flag)\n            {\n                video->BottomFieldOrderCnt = video->PicOrderCnt;\n            }\n            else\n            {\n                video->TopFieldOrderCnt = video->PicOrderCnt;\n            }\n            break;\n        default:\n            return AVCENC_POC_FAIL;\n    }\n\n    return AVCENC_SUCCESS;\n}\n\n/** see subclause 8.2.1 */\nAVCEnc_Status PostPOC(AVCCommonObj *video)\n{\n    AVCSliceHeader *sliceHdr = video->sliceHdr;\n    AVCSeqParamSet *currSPS = video->currSeqParams;\n\n    video->prevFrameNum = sliceHdr->frame_num;\n\n    switch (currSPS->pic_order_cnt_type)\n    {\n        case 0: /* subclause 8.2.1.1 */\n            if (video->mem_mgr_ctrl_eq_5)\n            {\n                video->prevPicOrderCntMsb = 0;\n                video->prevPicOrderCntLsb = video->TopFieldOrderCnt;\n            }\n            else\n            {\n                video->prevPicOrderCntMsb = video->PicOrderCntMsb;\n                video->prevPicOrderCntLsb = sliceHdr->pic_order_cnt_lsb;\n            }\n            break;\n        case 1:  /* subclause 8.2.1.2 and 8.2.1.3 */\n        case 2:\n            if (video->mem_mgr_ctrl_eq_5)\n            {\n                video->prevFrameNumOffset = 0;\n            }\n            else\n            {\n                video->prevFrameNumOffset = video->FrameNumOffset;\n            }\n            break;\n    }\n\n    return AVCENC_SUCCESS;\n}\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/init.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2010 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"avcenc_lib.h\"\n#include \"avcenc_api.h\"\n#include \"oscl_string.h\"\n\n#define LOG2_MAX_FRAME_NUM_MINUS4   12   /* 12 default */\n#define SLICE_GROUP_CHANGE_CYCLE    1    /* default */\n\n/* initialized variables to be used in SPS*/\nAVCEnc_Status  SetEncodeParam(AVCHandle* avcHandle, AVCEncParams* encParam,\n                              void* extSPS, void* extPPS)\n{\n    AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject;\n    AVCCommonObj *video = encvid->common;\n    AVCSeqParamSet *seqParam = video->currSeqParams;\n    AVCPicParamSet *picParam = video->currPicParams;\n    AVCSliceHeader *sliceHdr = video->sliceHdr;\n    AVCRateControl *rateCtrl = encvid->rateCtrl;\n    AVCEnc_Status status;\n    void *userData = avcHandle->userData;\n    int ii, maxFrameNum;\n\n    AVCSeqParamSet* extS = NULL;\n    AVCPicParamSet* extP = NULL;\n\n    if (extSPS) extS = (AVCSeqParamSet*) extSPS;\n    if (extPPS) extP = (AVCPicParamSet*) extPPS;\n\n    /* This part sets the default values of the encoding options this\n    library supports in seqParam, picParam and sliceHdr structures and\n    also copy the values from the encParam into the above 3 structures.\n\n    Some parameters will be assigned later when we encode SPS or PPS such as\n    the seq_parameter_id or pic_parameter_id. Also some of the slice parameters\n    have to be re-assigned per slice basis such as frame_num, slice_type,\n    first_mb_in_slice, pic_order_cnt_lsb, slice_qp_delta, slice_group_change_cycle */\n\n    /* profile_idc, constrained_setx_flag and level_idc is set by VerifyProfile(),\n    and VerifyLevel() functions later. */\n\n    encvid->fullsearch_enable = encParam->fullsearch;\n\n    encvid->outOfBandParamSet = ((encParam->out_of_band_param_set == AVC_ON) ? TRUE : FALSE);\n\n    /* parameters derived from the the encParam that are used in SPS */\n    if (extS)\n    {\n        video->MaxPicOrderCntLsb =  1 << (extS->log2_max_pic_order_cnt_lsb_minus4 + 4);\n        video->PicWidthInMbs = extS->pic_width_in_mbs_minus1 + 1;\n        video->PicHeightInMapUnits = extS->pic_height_in_map_units_minus1 + 1 ;\n        video->FrameHeightInMbs = (2 - extS->frame_mbs_only_flag) * video->PicHeightInMapUnits ;\n    }\n    else\n    {\n        video->MaxPicOrderCntLsb =  1 << (encParam->log2_max_poc_lsb_minus_4 + 4);\n        video->PicWidthInMbs = (encParam->width + 15) >> 4; /* round it to multiple of 16 */\n        video->FrameHeightInMbs = (encParam->height + 15) >> 4; /* round it to multiple of 16 */\n        video->PicHeightInMapUnits = video->FrameHeightInMbs;\n    }\n\n    video->PicWidthInSamplesL = video->PicWidthInMbs * 16 ;\n    if (video->PicWidthInSamplesL + 32 > 0xFFFF)\n    {\n        return AVCENC_NOT_SUPPORTED; // we use 2-bytes for pitch\n    }\n\n    video->PicWidthInSamplesC = video->PicWidthInMbs * 8 ;\n    video->PicHeightInMbs = video->FrameHeightInMbs;\n    video->PicSizeInMapUnits = video->PicWidthInMbs * video->PicHeightInMapUnits ;\n    video->PicHeightInSamplesL = video->PicHeightInMbs * 16;\n    video->PicHeightInSamplesC = video->PicHeightInMbs * 8;\n    video->PicSizeInMbs = video->PicWidthInMbs * video->PicHeightInMbs;\n\n    if (!extS && !extP)\n    {\n        maxFrameNum = (encParam->idr_period == 0) ? (1 << 16) : encParam->idr_period;\n        ii = 0;\n        while (maxFrameNum > 0)\n        {\n            ii++;\n            maxFrameNum >>= 1;\n        }\n        if (ii < 4) ii = 4;\n        else if (ii > 16) ii = 16;\n\n        seqParam->log2_max_frame_num_minus4 = ii - 4;//LOG2_MAX_FRAME_NUM_MINUS4; /* default */\n\n        video->MaxFrameNum = 1 << ii; //(LOG2_MAX_FRAME_NUM_MINUS4 + 4); /* default */\n        video->MaxPicNum = video->MaxFrameNum;\n\n        /************* set the SPS *******************/\n        seqParam->seq_parameter_set_id = 0; /* start with zero */\n        /* POC */\n        seqParam->pic_order_cnt_type = encParam->poc_type; /* POC type */\n        if (encParam->poc_type == 0)\n        {\n            if (/*encParam->log2_max_poc_lsb_minus_4<0 || (no need, it's unsigned)*/\n                encParam->log2_max_poc_lsb_minus_4 > 12)\n            {\n                return AVCENC_INVALID_POC_LSB;\n            }\n            seqParam->log2_max_pic_order_cnt_lsb_minus4 = encParam->log2_max_poc_lsb_minus_4;\n        }\n        else if (encParam->poc_type == 1)\n        {\n            seqParam->delta_pic_order_always_zero_flag = encParam->delta_poc_zero_flag;\n            seqParam->offset_for_non_ref_pic = encParam->offset_poc_non_ref;\n            seqParam->offset_for_top_to_bottom_field = encParam->offset_top_bottom;\n            seqParam->num_ref_frames_in_pic_order_cnt_cycle = encParam->num_ref_in_cycle;\n            if (encParam->offset_poc_ref == NULL)\n            {\n                return AVCENC_ENCPARAM_MEM_FAIL;\n            }\n            for (ii = 0; ii < encParam->num_ref_frame; ii++)\n            {\n                seqParam->offset_for_ref_frame[ii] = encParam->offset_poc_ref[ii];\n            }\n        }\n        /* number of reference frame */\n        if (encParam->num_ref_frame > 16 || encParam->num_ref_frame < 0)\n        {\n            return AVCENC_INVALID_NUM_REF;\n        }\n        seqParam->num_ref_frames = encParam->num_ref_frame; /* num reference frame range 0...16*/\n        seqParam->gaps_in_frame_num_value_allowed_flag = FALSE;\n        seqParam->pic_width_in_mbs_minus1 = video->PicWidthInMbs - 1;\n        seqParam->pic_height_in_map_units_minus1 = video->PicHeightInMapUnits - 1;\n        seqParam->frame_mbs_only_flag = TRUE;\n        seqParam->mb_adaptive_frame_field_flag = FALSE;\n        seqParam->direct_8x8_inference_flag = TRUE; /* default */\n        seqParam->frame_cropping_flag = FALSE;\n        seqParam->frame_crop_bottom_offset = 0;\n        seqParam->frame_crop_left_offset = 0;\n        seqParam->frame_crop_right_offset = 0;\n        seqParam->frame_crop_top_offset = 0;\n        seqParam->vui_parameters_present_flag = FALSE; /* default */\n    }\n    else if (extS) // use external SPS and PPS\n    {\n        seqParam->seq_parameter_set_id = extS->seq_parameter_set_id;\n        seqParam->log2_max_frame_num_minus4 = extS->log2_max_frame_num_minus4;\n        video->MaxFrameNum = 1 << (extS->log2_max_frame_num_minus4 + 4);\n        video->MaxPicNum = video->MaxFrameNum;\n        if (encParam->idr_period > (int)(video->MaxFrameNum) || (encParam->idr_period == -1))\n        {\n            encParam->idr_period = (int)video->MaxFrameNum;\n        }\n\n        seqParam->pic_order_cnt_type = extS->pic_order_cnt_type;\n        if (seqParam->pic_order_cnt_type == 0)\n        {\n            if (/*extS->log2_max_pic_order_cnt_lsb_minus4<0 || (no need it's unsigned)*/\n                extS->log2_max_pic_order_cnt_lsb_minus4 > 12)\n            {\n                return AVCENC_INVALID_POC_LSB;\n            }\n            seqParam->log2_max_pic_order_cnt_lsb_minus4 = extS->log2_max_pic_order_cnt_lsb_minus4;\n        }\n        else if (seqParam->pic_order_cnt_type == 1)\n        {\n            seqParam->delta_pic_order_always_zero_flag = extS->delta_pic_order_always_zero_flag;\n            seqParam->offset_for_non_ref_pic = extS->offset_for_non_ref_pic;\n            seqParam->offset_for_top_to_bottom_field = extS->offset_for_top_to_bottom_field;\n            seqParam->num_ref_frames_in_pic_order_cnt_cycle = extS->num_ref_frames_in_pic_order_cnt_cycle;\n            if (extS->offset_for_ref_frame == NULL)\n            {\n                return AVCENC_ENCPARAM_MEM_FAIL;\n            }\n            for (ii = 0; ii < (int) extS->num_ref_frames; ii++)\n            {\n                seqParam->offset_for_ref_frame[ii] = extS->offset_for_ref_frame[ii];\n            }\n        }\n        /* number of reference frame */\n        if (extS->num_ref_frames > 16 /*|| extS->num_ref_frames<0 (no need, it's unsigned)*/)\n        {\n            return AVCENC_INVALID_NUM_REF;\n        }\n        seqParam->num_ref_frames = extS->num_ref_frames; /* num reference frame range 0...16*/\n        seqParam->gaps_in_frame_num_value_allowed_flag = extS->gaps_in_frame_num_value_allowed_flag;\n        seqParam->pic_width_in_mbs_minus1 = extS->pic_width_in_mbs_minus1;\n        seqParam->pic_height_in_map_units_minus1 = extS->pic_height_in_map_units_minus1;\n        seqParam->frame_mbs_only_flag = extS->frame_mbs_only_flag;\n        if (extS->frame_mbs_only_flag != TRUE)\n        {\n            return AVCENC_NOT_SUPPORTED;\n        }\n        seqParam->mb_adaptive_frame_field_flag = extS->mb_adaptive_frame_field_flag;\n        if (extS->mb_adaptive_frame_field_flag != FALSE)\n        {\n            return AVCENC_NOT_SUPPORTED;\n        }\n\n        seqParam->direct_8x8_inference_flag = extS->direct_8x8_inference_flag;\n        seqParam->frame_cropping_flag = extS->frame_cropping_flag ;\n        if (extS->frame_cropping_flag != FALSE)\n        {\n            return AVCENC_NOT_SUPPORTED;\n        }\n\n        seqParam->frame_crop_bottom_offset = 0;\n        seqParam->frame_crop_left_offset = 0;\n        seqParam->frame_crop_right_offset = 0;\n        seqParam->frame_crop_top_offset = 0;\n        seqParam->vui_parameters_present_flag = extS->vui_parameters_present_flag;\n        if (extS->vui_parameters_present_flag)\n        {\n            oscl_memcpy(&(seqParam->vui_parameters), &(extS->vui_parameters), sizeof(AVCVUIParams));\n        }\n    }\n    else\n    {\n        return AVCENC_NOT_SUPPORTED;\n    }\n\n    /***************** now PPS ******************************/\n    if (!extP && !extS)\n    {\n        picParam->pic_parameter_set_id = (uint)(-1); /* start with zero */\n        picParam->seq_parameter_set_id = (uint)(-1); /* start with zero */\n        picParam->entropy_coding_mode_flag = 0; /* default to CAVLC */\n        picParam->pic_order_present_flag = 0; /* default for now, will need it for B-slice */\n        /* FMO */\n        if (encParam->num_slice_group < 1 || encParam->num_slice_group > MAX_NUM_SLICE_GROUP)\n        {\n            return AVCENC_INVALID_NUM_SLICEGROUP;\n        }\n        picParam->num_slice_groups_minus1 = encParam->num_slice_group - 1;\n\n        if (picParam->num_slice_groups_minus1 > 0)\n        {\n            picParam->slice_group_map_type = encParam->fmo_type;\n            switch (encParam->fmo_type)\n            {\n                case 0:\n                    for (ii = 0; ii <= (int)picParam->num_slice_groups_minus1; ii++)\n                    {\n                        picParam->run_length_minus1[ii] = encParam->run_length_minus1[ii];\n                    }\n                    break;\n                case 2:\n                    for (ii = 0; ii < (int)picParam->num_slice_groups_minus1; ii++)\n                    {\n                        picParam->top_left[ii] = encParam->top_left[ii];\n                        picParam->bottom_right[ii] = encParam->bottom_right[ii];\n                    }\n                    break;\n                case 3:\n                case 4:\n                case 5:\n                    if (encParam->change_dir_flag == AVC_ON)\n                    {\n                        picParam->slice_group_change_direction_flag = TRUE;\n                    }\n                    else\n                    {\n                        picParam->slice_group_change_direction_flag = FALSE;\n                    }\n                    if (/*encParam->change_rate_minus1 < 0 || (no need it's unsigned) */\n                        encParam->change_rate_minus1 > video->PicSizeInMapUnits - 1)\n                    {\n                        return AVCENC_INVALID_CHANGE_RATE;\n                    }\n                    picParam->slice_group_change_rate_minus1 = encParam->change_rate_minus1;\n                    video->SliceGroupChangeRate = picParam->slice_group_change_rate_minus1 + 1;\n                    break;\n                case 6:\n                    picParam->pic_size_in_map_units_minus1 = video->PicSizeInMapUnits - 1;\n\n                    /* allocate picParam->slice_group_id */\n                    picParam->slice_group_id = (uint*)avcHandle->CBAVC_Malloc(userData, sizeof(uint) * video->PicSizeInMapUnits, DEFAULT_ATTR);\n                    if (picParam->slice_group_id == NULL)\n                    {\n                        return AVCENC_MEMORY_FAIL;\n                    }\n\n                    if (encParam->slice_group == NULL)\n                    {\n                        return AVCENC_ENCPARAM_MEM_FAIL;\n                    }\n                    for (ii = 0; ii < (int)video->PicSizeInMapUnits; ii++)\n                    {\n                        picParam->slice_group_id[ii] = encParam->slice_group[ii];\n                    }\n                    break;\n                default:\n                    return AVCENC_INVALID_FMO_TYPE;\n            }\n        }\n        picParam->num_ref_idx_l0_active_minus1 = encParam->num_ref_frame - 1; /* assume frame only */\n        picParam->num_ref_idx_l1_active_minus1 = 0; /* default value */\n        picParam->weighted_pred_flag = 0; /* no weighted prediction supported */\n        picParam->weighted_bipred_idc = 0; /* range 0,1,2 */\n        if (/*picParam->weighted_bipred_idc < 0 || (no need, it's unsigned) */\n            picParam->weighted_bipred_idc > 2)\n        {\n            return AVCENC_WEIGHTED_BIPRED_FAIL;\n        }\n        picParam->pic_init_qp_minus26 = encParam->initQP - 26;\n        if (picParam->pic_init_qp_minus26 < -26 || picParam->pic_init_qp_minus26 > 25)\n        {\n            return AVCENC_INIT_QP_FAIL; /* out of range */\n        }\n        picParam->pic_init_qs_minus26 = 0;\n        if (picParam->pic_init_qs_minus26 < -26 || picParam->pic_init_qs_minus26 > 25)\n        {\n            return AVCENC_INIT_QS_FAIL; /* out of range */\n        }\n\n        picParam->chroma_qp_index_offset = 0; /* default to zero for now */\n        if (picParam->chroma_qp_index_offset < -12 || picParam->chroma_qp_index_offset > 12)\n        {\n            return AVCENC_CHROMA_QP_FAIL; /* out of range */\n        }\n        /* deblocking */\n        picParam->deblocking_filter_control_present_flag = (encParam->db_filter == AVC_ON) ? TRUE : FALSE ;\n        /* constrained intra prediction */\n        picParam->constrained_intra_pred_flag = (encParam->constrained_intra_pred == AVC_ON) ? TRUE : FALSE;\n        picParam->redundant_pic_cnt_present_flag = 0; /* default */\n    }\n    else if (extP)// external PPS\n    {\n        picParam->pic_parameter_set_id = extP->pic_parameter_set_id - 1; /* to be increased by one */\n        picParam->seq_parameter_set_id = extP->seq_parameter_set_id;\n        picParam->entropy_coding_mode_flag = extP->entropy_coding_mode_flag;\n        if (extP->entropy_coding_mode_flag != 0) /* default to CAVLC */\n        {\n            return AVCENC_NOT_SUPPORTED;\n        }\n        picParam->pic_order_present_flag = extP->pic_order_present_flag; /* default for now, will need it for B-slice */\n        if (extP->pic_order_present_flag != 0)\n        {\n            return AVCENC_NOT_SUPPORTED;\n        }\n        /* FMO */\n        if (/*(extP->num_slice_groups_minus1<0) || (no need it's unsigned) */\n            (extP->num_slice_groups_minus1 > MAX_NUM_SLICE_GROUP - 1))\n        {\n            return AVCENC_INVALID_NUM_SLICEGROUP;\n        }\n        picParam->num_slice_groups_minus1 = extP->num_slice_groups_minus1;\n\n        if (picParam->num_slice_groups_minus1 > 0)\n        {\n            picParam->slice_group_map_type = extP->slice_group_map_type;\n            switch (extP->slice_group_map_type)\n            {\n                case 0:\n                    for (ii = 0; ii <= (int)extP->num_slice_groups_minus1; ii++)\n                    {\n                        picParam->run_length_minus1[ii] = extP->run_length_minus1[ii];\n                    }\n                    break;\n                case 2:\n                    for (ii = 0; ii < (int)picParam->num_slice_groups_minus1; ii++)\n                    {\n                        picParam->top_left[ii] = extP->top_left[ii];\n                        picParam->bottom_right[ii] = extP->bottom_right[ii];\n                    }\n                    break;\n                case 3:\n                case 4:\n                case 5:\n                    picParam->slice_group_change_direction_flag = extP->slice_group_change_direction_flag;\n                    if (/*extP->slice_group_change_rate_minus1 < 0 || (no need, it's unsigned) */\n                        extP->slice_group_change_rate_minus1 > video->PicSizeInMapUnits - 1)\n                    {\n                        return AVCENC_INVALID_CHANGE_RATE;\n                    }\n                    picParam->slice_group_change_rate_minus1 = extP->slice_group_change_rate_minus1;\n                    video->SliceGroupChangeRate = picParam->slice_group_change_rate_minus1 + 1;\n                    break;\n                case 6:\n                    if (extP->pic_size_in_map_units_minus1 != video->PicSizeInMapUnits - 1)\n                    {\n                        return AVCENC_NOT_SUPPORTED;\n                    }\n\n                    picParam->pic_size_in_map_units_minus1 = extP->pic_size_in_map_units_minus1;\n\n                    /* allocate picParam->slice_group_id */\n                    picParam->slice_group_id = (uint*)avcHandle->CBAVC_Malloc(userData, sizeof(uint) * video->PicSizeInMapUnits, DEFAULT_ATTR);\n                    if (picParam->slice_group_id == NULL)\n                    {\n                        return AVCENC_MEMORY_FAIL;\n                    }\n\n                    if (extP->slice_group_id == NULL)\n                    {\n                        return AVCENC_ENCPARAM_MEM_FAIL;\n                    }\n                    for (ii = 0; ii < (int)video->PicSizeInMapUnits; ii++)\n                    {\n                        picParam->slice_group_id[ii] = extP->slice_group_id[ii];\n                    }\n                    break;\n                default:\n                    return AVCENC_INVALID_FMO_TYPE;\n            }\n        }\n        picParam->num_ref_idx_l0_active_minus1 = extP->num_ref_idx_l0_active_minus1;\n        picParam->num_ref_idx_l1_active_minus1 = extP->num_ref_idx_l1_active_minus1; /* default value */\n        if (picParam->num_ref_idx_l1_active_minus1 != 0)\n        {\n            return AVCENC_NOT_SUPPORTED;\n        }\n\n        if (extP->weighted_pred_flag)\n        {\n            return AVCENC_NOT_SUPPORTED;\n        }\n\n        picParam->weighted_pred_flag = 0; /* no weighted prediction supported */\n        picParam->weighted_bipred_idc = extP->weighted_bipred_idc; /* range 0,1,2 */\n        if (/*picParam->weighted_bipred_idc < 0 || (no need, it's unsigned) */\n            picParam->weighted_bipred_idc > 2)\n        {\n            return AVCENC_WEIGHTED_BIPRED_FAIL;\n        }\n        picParam->pic_init_qp_minus26 = extP->pic_init_qp_minus26; /* default, will be changed at slice level anyway */\n        if (picParam->pic_init_qp_minus26 < -26 || picParam->pic_init_qp_minus26 > 25)\n        {\n            return AVCENC_INIT_QP_FAIL; /* out of range */\n        }\n        picParam->pic_init_qs_minus26 = extP->pic_init_qs_minus26;\n        if (picParam->pic_init_qs_minus26 < -26 || picParam->pic_init_qs_minus26 > 25)\n        {\n            return AVCENC_INIT_QS_FAIL; /* out of range */\n        }\n\n        picParam->chroma_qp_index_offset = extP->chroma_qp_index_offset; /* default to zero for now */\n        if (picParam->chroma_qp_index_offset < -12 || picParam->chroma_qp_index_offset > 12)\n        {\n            return AVCENC_CHROMA_QP_FAIL; /* out of range */\n        }\n        /* deblocking */\n        picParam->deblocking_filter_control_present_flag = extP->deblocking_filter_control_present_flag;\n        /* constrained intra prediction */\n        picParam->constrained_intra_pred_flag = extP->constrained_intra_pred_flag;\n        if (extP->redundant_pic_cnt_present_flag  != 0)\n        {\n            return AVCENC_NOT_SUPPORTED;\n        }\n        picParam->redundant_pic_cnt_present_flag = extP->redundant_pic_cnt_present_flag; /* default */\n    }\n    else\n    {\n        return AVCENC_NOT_SUPPORTED;\n    }\n\n    /****************** now set up some SliceHeader parameters ***********/\n    if (picParam->deblocking_filter_control_present_flag == TRUE)\n    {\n        /* these values only present when db_filter is ON */\n        if (encParam->disable_db_idc > 2)\n        {\n            return AVCENC_INVALID_DEBLOCK_IDC; /* out of range */\n        }\n        sliceHdr->disable_deblocking_filter_idc = encParam->disable_db_idc;\n\n        if (encParam->alpha_offset < -6 || encParam->alpha_offset > 6)\n        {\n            return AVCENC_INVALID_ALPHA_OFFSET;\n        }\n        sliceHdr->slice_alpha_c0_offset_div2 = encParam->alpha_offset;\n\n        if (encParam->beta_offset < -6 || encParam->beta_offset > 6)\n        {\n            return AVCENC_INVALID_BETA_OFFSET;\n        }\n        sliceHdr->slice_beta_offset_div_2 =  encParam->beta_offset;\n    }\n    if (encvid->outOfBandParamSet == TRUE)\n    {\n        sliceHdr->idr_pic_id = 0;\n    }\n    else\n    {\n        sliceHdr->idr_pic_id = (uint)(-1); /* start with zero */\n    }\n    sliceHdr->field_pic_flag = FALSE;\n    sliceHdr->bottom_field_flag = FALSE;  /* won't be used anyway */\n    video->MbaffFrameFlag = (seqParam->mb_adaptive_frame_field_flag && !sliceHdr->field_pic_flag);\n\n    /* the rest will be set in InitSlice() */\n\n    /* now the rate control and performance related parameters */\n    rateCtrl->scdEnable = (encParam->auto_scd == AVC_ON) ? TRUE : FALSE;\n    rateCtrl->idrPeriod = encParam->idr_period;// + 1;\n    rateCtrl->intraMBRate = encParam->intramb_refresh;\n    rateCtrl->dpEnable = (encParam->data_par == AVC_ON) ? TRUE : FALSE;\n\n    rateCtrl->subPelEnable = (encParam->sub_pel == AVC_ON) ? TRUE : FALSE;\n    rateCtrl->mvRange = encParam->search_range;\n\n    rateCtrl->subMBEnable = (encParam->submb_pred == AVC_ON) ? TRUE : FALSE;\n    rateCtrl->rdOptEnable = (encParam->rdopt_mode == AVC_ON) ? TRUE : FALSE;\n    rateCtrl->bidirPred = (encParam->bidir_pred == AVC_ON) ? TRUE : FALSE;\n\n    rateCtrl->rcEnable = (encParam->rate_control == AVC_ON) ? TRUE : FALSE;\n    rateCtrl->initQP = encParam->initQP;\n    rateCtrl->initQP = AVC_CLIP3(0, 51, rateCtrl->initQP);\n\n    rateCtrl->bitRate = encParam->bitrate;\n    rateCtrl->cpbSize = encParam->CPB_size;\n    rateCtrl->initDelayOffset = (rateCtrl->bitRate * encParam->init_CBP_removal_delay / 1000);\n\n    if (encParam->frame_rate == 0)\n    {\n        return AVCENC_INVALID_FRAMERATE;\n    }\n\n    rateCtrl->frame_rate = (OsclFloat)(encParam->frame_rate * 1.0 / 1000);\n//  rateCtrl->srcInterval = encParam->src_interval;\n    rateCtrl->first_frame = 1; /* set this flag for the first time */\n\n    /* contrained_setx_flag will be set inside the VerifyProfile called below.*/\n    if (!extS && !extP)\n    {\n        seqParam->profile_idc = encParam->profile;\n        seqParam->constrained_set0_flag = FALSE;\n        seqParam->constrained_set1_flag = FALSE;\n        seqParam->constrained_set2_flag = FALSE;\n        seqParam->constrained_set3_flag = FALSE;\n        seqParam->level_idc = encParam->level;\n    }\n    else\n    {\n        seqParam->profile_idc = extS->profile_idc;\n        seqParam->constrained_set0_flag = extS->constrained_set0_flag;\n        seqParam->constrained_set1_flag = extS->constrained_set1_flag;\n        seqParam->constrained_set2_flag = extS->constrained_set2_flag;\n        seqParam->constrained_set3_flag = extS->constrained_set3_flag;\n        seqParam->level_idc = extS->level_idc;\n    }\n\n\n    status = VerifyProfile(encvid, seqParam, picParam);\n    if (status != AVCENC_SUCCESS)\n    {\n        return status;\n    }\n\n    status = VerifyLevel(encvid, seqParam, picParam);\n    if (status != AVCENC_SUCCESS)\n    {\n        return status;\n    }\n\n    return AVCENC_SUCCESS;\n}\n\n/* verify the profile setting */\nAVCEnc_Status VerifyProfile(AVCEncObject *encvid, AVCSeqParamSet *seqParam, AVCPicParamSet *picParam)\n{\n    AVCRateControl *rateCtrl = encvid->rateCtrl;\n    AVCEnc_Status status = AVCENC_SUCCESS;\n\n    if (seqParam->profile_idc == 0) /* find profile for this setting */\n    {\n        /* find the right profile for it */\n        if (seqParam->direct_8x8_inference_flag == TRUE &&\n                picParam->entropy_coding_mode_flag == FALSE &&\n                picParam->num_slice_groups_minus1 <= 7 /*&&\n            picParam->num_slice_groups_minus1>=0 (no need, it's unsigned) */)\n        {\n            seqParam->profile_idc = AVC_EXTENDED;\n            seqParam->constrained_set2_flag = TRUE;\n        }\n\n        if (rateCtrl->dpEnable == FALSE &&\n                picParam->num_slice_groups_minus1 == 0 &&\n                picParam->redundant_pic_cnt_present_flag == FALSE)\n        {\n            seqParam->profile_idc = AVC_MAIN;\n            seqParam->constrained_set1_flag = TRUE;\n        }\n\n        if (rateCtrl->bidirPred == FALSE &&\n                rateCtrl->dpEnable == FALSE &&\n                seqParam->frame_mbs_only_flag == TRUE &&\n                picParam->weighted_pred_flag == FALSE &&\n                picParam->weighted_bipred_idc == 0 &&\n                picParam->entropy_coding_mode_flag == FALSE &&\n                picParam->num_slice_groups_minus1 <= 7 /*&&\n            picParam->num_slice_groups_minus1>=0 (no need, it's unsigned)*/)\n        {\n            seqParam->profile_idc = AVC_BASELINE;\n            seqParam->constrained_set0_flag = TRUE;\n        }\n\n        if (seqParam->profile_idc == 0) /* still zero */\n        {\n            return AVCENC_PROFILE_NOT_SUPPORTED;\n        }\n    }\n\n    /* check the list of supported profile by this library */\n    switch (seqParam->profile_idc)\n    {\n        case AVC_BASELINE:\n            if (rateCtrl->bidirPred == TRUE ||\n                    rateCtrl->dpEnable == TRUE ||\n                    seqParam->frame_mbs_only_flag != TRUE ||\n                    picParam->weighted_pred_flag == TRUE ||\n                    picParam->weighted_bipred_idc != 0 ||\n                    picParam->entropy_coding_mode_flag == TRUE ||\n                    picParam->num_slice_groups_minus1 > 7 /*||\n            picParam->num_slice_groups_minus1<0 (no need, it's unsigned) */)\n            {\n                status = AVCENC_TOOLS_NOT_SUPPORTED;\n            }\n            break;\n\n        case AVC_MAIN:\n        case AVC_EXTENDED:\n            status = AVCENC_PROFILE_NOT_SUPPORTED;\n    }\n\n    return status;\n}\n\n/* verify the level setting */\nAVCEnc_Status VerifyLevel(AVCEncObject *encvid, AVCSeqParamSet *seqParam, AVCPicParamSet *picParam)\n{\n    (void)(picParam);\n\n    AVCRateControl *rateCtrl = encvid->rateCtrl;\n    AVCCommonObj *video = encvid->common;\n    int mb_per_sec, ii;\n    int lev_idx;\n    int dpb_size;\n\n    mb_per_sec = (int)(video->PicSizeInMbs * rateCtrl->frame_rate + 0.5);\n    dpb_size = (seqParam->num_ref_frames * video->PicSizeInMbs * 3) >> 6;\n\n    if (seqParam->level_idc == 0) /* find level for this setting */\n    {\n        for (ii = 0; ii < MAX_LEVEL_IDX; ii++)\n        {\n            if (mb_per_sec <= MaxMBPS[ii] &&\n                    video->PicSizeInMbs <= (uint)MaxFS[ii] &&\n                    rateCtrl->bitRate <= (int32)MaxBR[ii]*1000 &&\n                    rateCtrl->cpbSize <= (int32)MaxCPB[ii]*1000 &&\n                    rateCtrl->mvRange <= MaxVmvR[ii] &&\n                    dpb_size <= MaxDPBX2[ii]*512)\n            {\n                seqParam->level_idc = mapIdx2Lev[ii];\n                break;\n            }\n        }\n        if (seqParam->level_idc == 0)\n        {\n            return AVCENC_LEVEL_NOT_SUPPORTED;\n        }\n    }\n\n    /* check if this level is supported by this library */\n    lev_idx = mapLev2Idx[seqParam->level_idc];\n    if (seqParam->level_idc == AVC_LEVEL1_B)\n    {\n        seqParam->constrained_set3_flag = 1;\n    }\n\n\n    if (lev_idx == 255) /* not defined */\n    {\n        return AVCENC_LEVEL_NOT_SUPPORTED;\n    }\n\n    /* check if the encoding setting complies with the level */\n    if (mb_per_sec > MaxMBPS[lev_idx] ||\n            video->PicSizeInMbs > (uint)MaxFS[lev_idx] ||\n            rateCtrl->bitRate > (int32)MaxBR[lev_idx]*1000 ||\n            rateCtrl->cpbSize > (int32)MaxCPB[lev_idx]*1000 ||\n            rateCtrl->mvRange > MaxVmvR[lev_idx])\n    {\n        return AVCENC_LEVEL_FAIL;\n    }\n\n    return AVCENC_SUCCESS;\n}\n\n/* initialize variables at the beginning of each frame */\n/* determine the picture type */\n/* encode POC */\n/* maybe we should do more stuff here. MotionEstimation+SCD and generate a new SPS and PPS */\nAVCEnc_Status InitFrame(AVCEncObject *encvid)\n{\n    AVCStatus ret;\n    AVCEnc_Status status;\n    AVCCommonObj *video = encvid->common;\n    AVCSliceHeader *sliceHdr = video->sliceHdr;\n\n    /* look for the next frame in coding_order and look for available picture\n       in the DPB. Note, video->currFS->PicOrderCnt, currFS->FrameNum and currPic->PicNum\n       are set to wrong number in this function (right for decoder). */\n    if (video->nal_unit_type == AVC_NALTYPE_IDR)\n    {\n        // call init DPB in here.\n        ret = AVCConfigureSequence(encvid->avcHandle, video, TRUE);\n        if (ret != AVC_SUCCESS)\n        {\n            return AVCENC_FAIL;\n        }\n    }\n\n    /* flexible macroblock ordering (every frame)*/\n    /* populate video->mapUnitToSliceGroupMap and video->MbToSliceGroupMap */\n    /* It changes once per each PPS. */\n    FMOInit(video);\n\n    ret = DPBInitBuffer(encvid->avcHandle, video); // get new buffer\n\n    if (ret != AVC_SUCCESS)\n    {\n        return (AVCEnc_Status)ret; // AVCENC_PICTURE_READY, FAIL\n    }\n\n    DPBInitPic(video, 0); /* 0 is dummy */\n\n    /************* determine picture type IDR or non-IDR ***********/\n    video->currPicType = AVC_FRAME;\n    video->slice_data_partitioning = FALSE;\n    encvid->currInput->is_reference = 1; /* default to all frames */\n    video->nal_ref_idc = 1;  /* need to set this for InitPOC */\n    video->currPic->isReference = TRUE;\n\n    /************* set frame_num ********************/\n    if (video->nal_unit_type == AVC_NALTYPE_IDR)\n    {\n        video->prevFrameNum = video->MaxFrameNum;\n        video->PrevRefFrameNum = 0;\n        sliceHdr->frame_num = 0;\n    }\n    /* otherwise, it's set to previous reference frame access unit's frame_num in decoding order,\n       see the end of PVAVCDecodeSlice()*/\n    /* There's also restriction on the frame_num, see page 59 of JVT-I1010.doc. */\n    /* Basically, frame_num can't be repeated unless it's opposite fields or non reference fields */\n    else\n    {\n        sliceHdr->frame_num = (video->PrevRefFrameNum + 1) % video->MaxFrameNum;\n    }\n    video->CurrPicNum = sliceHdr->frame_num;  /* for field_pic_flag = 0 */\n    //video->CurrPicNum = 2*sliceHdr->frame_num + 1; /* for field_pic_flag = 1 */\n\n    /* assign pic_order_cnt, video->PicOrderCnt */\n    status = InitPOC(encvid);\n    if (status != AVCENC_SUCCESS)  /* incorrigable fail */\n    {\n        return status;\n    }\n\n    /* Initialize refListIdx for this picture */\n    RefListInit(video);\n\n    /************* motion estimation and scene analysis ************/\n    // , to move this to MB-based MV search for comparison\n    // use sub-optimal QP for mv search\n    AVCMotionEstimation(encvid);  /* AVCENC_SUCCESS or AVCENC_NEW_IDR */\n\n    /* after this point, the picture type will be fixed to either IDR or non-IDR */\n    video->currFS->PicOrderCnt = video->PicOrderCnt;\n    video->currFS->FrameNum = video->sliceHdr->frame_num;\n    video->currPic->PicNum = video->CurrPicNum;\n    video->mbNum = 0; /* start from zero MB */\n    encvid->currSliceGroup = 0; /* start from slice group #0 */\n    encvid->numIntraMB = 0; /* reset this counter */\n\n    if (video->nal_unit_type == AVC_NALTYPE_IDR)\n    {\n        RCInitGOP(encvid);\n\n        /* calculate picture QP */\n        RCInitFrameQP(encvid);\n\n        return AVCENC_NEW_IDR;\n    }\n\n    /* calculate picture QP */\n    RCInitFrameQP(encvid); /* get QP after MV search */\n\n    return AVCENC_SUCCESS;\n}\n\n/* initialize variables for this slice */\nAVCEnc_Status InitSlice(AVCEncObject *encvid)\n{\n    AVCCommonObj *video = encvid->common;\n    AVCSliceHeader *sliceHdr = video->sliceHdr;\n    AVCPicParamSet *currPPS = video->currPicParams;\n    AVCSeqParamSet *currSPS = video->currSeqParams;\n    int slice_type = video->slice_type;\n\n    sliceHdr->first_mb_in_slice = video->mbNum;\n    if (video->mbNum) // not first slice of a frame\n    {\n        video->sliceHdr->slice_type = (AVCSliceType)slice_type;\n    }\n\n    /* sliceHdr->slice_type already set in InitFrame */\n\n    sliceHdr->pic_parameter_set_id = video->currPicParams->pic_parameter_set_id;\n\n    /* sliceHdr->frame_num already set in InitFrame */\n\n    if (!currSPS->frame_mbs_only_flag)  /* we shouldn't need this check */\n    {\n        sliceHdr->field_pic_flag = sliceHdr->bottom_field_flag = FALSE;\n        return AVCENC_TOOLS_NOT_SUPPORTED;\n    }\n\n    /* sliceHdr->idr_pic_id already set in PVAVCEncodeNAL\n\n     sliceHdr->pic_order_cnt_lsb already set in InitFrame..InitPOC\n     sliceHdr->delta_pic_order_cnt_bottom  already set in InitPOC\n\n    sliceHdr->delta_pic_order_cnt[0] already set in InitPOC\n    sliceHdr->delta_pic_order_cnt[1] already set in InitPOC\n    */\n\n    sliceHdr->redundant_pic_cnt = 0; /* default if(currPPS->redundant_pic_cnt_present_flag), range 0..127 */\n    sliceHdr->direct_spatial_mv_pred_flag = 0; // default if(slice_type == AVC_B_SLICE)\n\n    sliceHdr->num_ref_idx_active_override_flag = FALSE; /* default, if(slice_type== P,SP or B)*/\n    sliceHdr->num_ref_idx_l0_active_minus1 = 0; /* default, if (num_ref_idx_active_override_flag) */\n    sliceHdr->num_ref_idx_l1_active_minus1 = 0; /* default, if above and B_slice */\n    /* the above 2 values range from 0..15 for frame picture and 0..31 for field picture */\n\n    /* ref_pic_list_reordering(), currently we don't do anything */\n    sliceHdr->ref_pic_list_reordering_flag_l0 = FALSE; /* default */\n    sliceHdr->ref_pic_list_reordering_flag_l1 = FALSE; /* default */\n    /* if the above are TRUE, some other params must be set */\n\n    if ((currPPS->weighted_pred_flag && (slice_type == AVC_P_SLICE || slice_type == AVC_SP_SLICE)) ||\n            (currPPS->weighted_bipred_idc == 1 && slice_type == AVC_B_SLICE))\n    {\n        //      pred_weight_table(); // not supported !!\n        return AVCENC_TOOLS_NOT_SUPPORTED;\n    }\n\n    /* dec_ref_pic_marking(), this will be done later*/\n    sliceHdr->no_output_of_prior_pics_flag = FALSE; /* default */\n    sliceHdr->long_term_reference_flag = FALSE; /* for IDR frame, do not make it long term */\n    sliceHdr->adaptive_ref_pic_marking_mode_flag = FALSE; /* default */\n    /* other params are not set here because they are not used */\n\n    sliceHdr->cabac_init_idc = 0; /* default, if entropy_coding_mode_flag && slice_type==I or SI, range 0..2  */\n    sliceHdr->slice_qp_delta = 0; /* default for now */\n    sliceHdr->sp_for_switch_flag = FALSE; /* default, if slice_type == SP */\n    sliceHdr->slice_qs_delta = 0; /* default, if slice_type == SP or SI */\n\n    /* derived variables from encParam */\n    /* deblocking filter */\n    video->FilterOffsetA = video->FilterOffsetB = 0;\n    if (currPPS->deblocking_filter_control_present_flag == TRUE)\n    {\n        video->FilterOffsetA = sliceHdr->slice_alpha_c0_offset_div2 << 1;\n        video->FilterOffsetB = sliceHdr->slice_beta_offset_div_2 << 1;\n    }\n\n    /* flexible macroblock ordering */\n    /* populate video->mapUnitToSliceGroupMap and video->MbToSliceGroupMap */\n    /* We already call it at the end of PVAVCEncInitialize(). It changes once per each PPS. */\n    if (video->currPicParams->num_slice_groups_minus1 > 0 && video->currPicParams->slice_group_map_type >= 3\n            && video->currPicParams->slice_group_map_type <= 5)\n    {\n        sliceHdr->slice_group_change_cycle = SLICE_GROUP_CHANGE_CYCLE;  /* default, don't understand how to set it!!!*/\n\n        video->MapUnitsInSliceGroup0 =\n            AVC_MIN(sliceHdr->slice_group_change_cycle * video->SliceGroupChangeRate, video->PicSizeInMapUnits);\n\n        FMOInit(video);\n    }\n\n    /* calculate SliceQPy first  */\n    /* calculate QSy first */\n\n    sliceHdr->slice_qp_delta = video->QPy - 26 - currPPS->pic_init_qp_minus26;\n    //sliceHdr->slice_qs_delta = video->QSy - 26 - currPPS->pic_init_qs_minus26;\n\n    return AVCENC_SUCCESS;\n}\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/intra_est.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"oscl_mem.h\"\n#include \"avcenc_lib.h\"\n\n#define TH_I4  0  /* threshold biasing toward I16 mode instead of I4 mode */\n#define TH_Intra  0 /* threshold biasing toward INTER mode instead of intra mode */\n\n#define FIXED_INTRAPRED_MODE  AVC_I16\n#define FIXED_I16_MODE  AVC_I16_DC\n#define FIXED_I4_MODE   AVC_I4_Diagonal_Down_Left\n#define FIXED_INTRA_CHROMA_MODE AVC_IC_DC\n\n#define CLIP_RESULT(x)      if((uint)x > 0xFF){ \\\n                 x = 0xFF & (~(x>>31));}\n\n\nbool IntraDecisionABE(AVCEncObject *encvid, int min_cost, uint8 *curL, int picPitch)\n{\n    AVCCommonObj *video = encvid->common;\n    AVCFrameIO *currInput = encvid->currInput;\n    int orgPitch = currInput->pitch;\n    int x_pos = (video->mb_x) << 4;\n    int y_pos = (video->mb_y) << 4;\n    uint8 *orgY = currInput->YCbCr[0] + y_pos * orgPitch + x_pos;\n    int j;\n    uint8 *topL, *leftL, *orgY_2, *orgY_3;\n    int temp, SBE, offset;\n    OsclFloat ABE;\n    bool intra = true;\n\n    if (((x_pos >> 4) != (int)video->PicWidthInMbs - 1) &&\n            ((y_pos >> 4) != (int)video->PicHeightInMbs - 1) &&\n            video->intraAvailA &&\n            video->intraAvailB)\n    {\n        SBE = 0;\n        /* top neighbor */\n        topL = curL - picPitch;\n        /* left neighbor */\n        leftL = curL - 1;\n        orgY_2 = orgY - orgPitch;\n\n        for (j = 0; j < 16; j++)\n        {\n            temp = *topL++ - orgY[j];\n            SBE += ((temp >= 0) ? temp : -temp);\n            temp = *(leftL += picPitch) - *(orgY_2 += orgPitch);\n            SBE += ((temp >= 0) ? temp : -temp);\n        }\n\n        /* calculate chroma */\n        offset = (y_pos >> 2) * picPitch + (x_pos >> 1);\n        topL = video->currPic->Scb + offset;\n        orgY_2 = currInput->YCbCr[1] + offset + (y_pos >> 2) * (orgPitch - picPitch);\n\n        leftL = topL - 1;\n        topL -= (picPitch >> 1);\n        orgY_3 = orgY_2 - (orgPitch >> 1);\n        for (j = 0; j < 8; j++)\n        {\n            temp = *topL++ - orgY_2[j];\n            SBE += ((temp >= 0) ? temp : -temp);\n            temp = *(leftL += (picPitch >> 1)) - *(orgY_3 += (orgPitch >> 1));\n            SBE += ((temp >= 0) ? temp : -temp);\n        }\n\n        topL = video->currPic->Scr + offset;\n        orgY_2 = currInput->YCbCr[2] + offset + (y_pos >> 2) * (orgPitch - picPitch);\n\n        leftL = topL - 1;\n        topL -= (picPitch >> 1);\n        orgY_3 = orgY_2 - (orgPitch >> 1);\n        for (j = 0; j < 8; j++)\n        {\n            temp = *topL++ - orgY_2[j];\n            SBE += ((temp >= 0) ? temp : -temp);\n            temp = *(leftL += (picPitch >> 1)) - *(orgY_3 += (orgPitch >> 1));\n            SBE += ((temp >= 0) ? temp : -temp);\n        }\n\n        /* compare mincost/384 and SBE/64 */\n        ABE = SBE / 64.0;\n        if (ABE*0.8 >= min_cost / 384.0)\n        {\n            intra = false;\n        }\n    }\n\n    return intra;\n}\n\n/* perform searching for MB mode */\n/* assuming that this is done inside the encoding loop,\nno need to call InitNeighborAvailability */\n\nvoid MBIntraSearch(AVCEncObject *encvid, int mbnum, uint8 *curL, int picPitch)\n{\n    AVCCommonObj *video = encvid->common;\n    AVCFrameIO *currInput = encvid->currInput;\n    AVCMacroblock *currMB = video->currMB;\n    int min_cost;\n    uint8 *orgY;\n    int x_pos = (video->mb_x) << 4;\n    int y_pos = (video->mb_y) << 4;\n    uint32 *saved_inter;\n    int j;\n    int orgPitch = currInput->pitch;\n    bool intra = true;\n\n    currMB->CBP = 0;\n\n    /* first do motion vector and variable block size search */\n    min_cost = encvid->min_cost[mbnum];\n\n    /* now perform intra prediction search */\n    /* need to add the check for encvid->intraSearch[video->mbNum] to skip intra\n       if it's not worth checking. */\n    if (video->slice_type == AVC_P_SLICE)\n    {\n        /* Decide whether intra search is necessary or not */\n        /* This one, we do it in the encoding loop so the neighboring pixel are the\n        actual reconstructed pixels. */\n        intra = IntraDecisionABE(encvid, min_cost, curL, picPitch);\n    }\n\n    if (intra == true || video->slice_type == AVC_I_SLICE)\n    {\n        orgY = currInput->YCbCr[0] + y_pos * orgPitch + x_pos;\n\n        /* i16 mode search */\n        /* generate all the predictions */\n        intrapred_luma_16x16(encvid);\n\n        /* evaluate them one by one */\n        find_cost_16x16(encvid, orgY, &min_cost);\n\n        if (video->slice_type == AVC_P_SLICE)\n        {\n            /* save current inter prediction */\n            saved_inter = encvid->subpel_pred; /* reuse existing buffer */\n            j = 16;\n            curL -= 4;\n            picPitch -= 16;\n            while (j--)\n            {\n                *saved_inter++ = *((uint32*)(curL += 4));\n                *saved_inter++ = *((uint32*)(curL += 4));\n                *saved_inter++ = *((uint32*)(curL += 4));\n                *saved_inter++ = *((uint32*)(curL += 4));\n                curL += picPitch;\n            }\n\n        }\n\n        /* i4 mode search */\n        mb_intra4x4_search(encvid, &min_cost);\n\n        encvid->min_cost[mbnum] = min_cost; /* update min_cost */\n    }\n\n\n    if (currMB->mb_intra)\n    {\n        chroma_intra_search(encvid);\n\n        /* need to set this in order for the MBInterPrediction to work!! */\n        oscl_memset(currMB->mvL0, 0, sizeof(int32)*16);\n        currMB->ref_idx_L0[0] = currMB->ref_idx_L0[1] =\n                                    currMB->ref_idx_L0[2] = currMB->ref_idx_L0[3] = -1;\n    }\n    else if (video->slice_type == AVC_P_SLICE && intra == true)\n    {\n        /* restore current inter prediction */\n        saved_inter = encvid->subpel_pred; /* reuse existing buffer */\n        j = 16;\n        curL -= ((picPitch + 16) << 4);\n        while (j--)\n        {\n            *((uint32*)(curL += 4)) = *saved_inter++;\n            *((uint32*)(curL += 4)) = *saved_inter++;\n            *((uint32*)(curL += 4)) = *saved_inter++;\n            *((uint32*)(curL += 4)) = *saved_inter++;\n            curL += picPitch;\n        }\n    }\n\n    return ;\n}\n\n/* generate all the prediction values */\nvoid intrapred_luma_16x16(AVCEncObject *encvid)\n{\n    AVCCommonObj *video = encvid->common;\n    AVCPictureData *currPic = video->currPic;\n\n    int x_pos = (video->mb_x) << 4;\n    int y_pos = (video->mb_y) << 4;\n    int pitch = currPic->pitch;\n\n    int offset = y_pos * pitch + x_pos;\n\n    uint8 *pred, *top, *left;\n    uint8 *curL = currPic->Sl + offset; /* point to reconstructed frame */\n    uint32 word1, word2, word3, word4;\n    uint32 sum = 0;\n\n    int a_16, b, c, factor_c;\n    uint8 *comp_ref_x0, *comp_ref_x1, *comp_ref_y0, *comp_ref_y1;\n    int H = 0, V = 0, tmp, value;\n    int i;\n\n    if (video->intraAvailB)\n    {\n        //get vertical prediction mode\n        top = curL - pitch;\n\n        pred = encvid->pred_i16[AVC_I16_Vertical] - 16;\n\n        word1 = *((uint32*)(top));  /* read 4 bytes from top */\n        word2 = *((uint32*)(top + 4)); /* read 4 bytes from top */\n        word3 = *((uint32*)(top + 8)); /* read 4 bytes from top */\n        word4 = *((uint32*)(top + 12)); /* read 4 bytes from top */\n\n        for (i = 0; i < 16; i++)\n        {\n            *((uint32*)(pred += 16)) = word1;\n            *((uint32*)(pred + 4)) = word2;\n            *((uint32*)(pred + 8)) = word3;\n            *((uint32*)(pred + 12)) = word4;\n\n        }\n\n        sum = word1 & 0xFF00FF;\n        word1 = (word1 >> 8) & 0xFF00FF;\n        sum += word1;\n        word1 = (word2 & 0xFF00FF);\n        sum += word1;\n        word2 = (word2 >> 8) & 0xFF00FF;\n        sum += word2;\n        word1 = (word3 & 0xFF00FF);\n        sum += word1;\n        word3 = (word3 >> 8) & 0xFF00FF;\n        sum += word3;\n        word1 = (word4 & 0xFF00FF);\n        sum += word1;\n        word4 = (word4 >> 8) & 0xFF00FF;\n        sum += word4;\n\n        sum += (sum >> 16);\n        sum &= 0xFFFF;\n\n        if (!video->intraAvailA)\n        {\n            sum = (sum + 8) >> 4;\n        }\n    }\n\n    if (video->intraAvailA)\n    {\n        // get horizontal mode\n        left = curL - 1 - pitch;\n\n        pred = encvid->pred_i16[AVC_I16_Horizontal] - 16;\n\n        for (i = 0; i < 16; i++)\n        {\n            word1 = *(left += pitch);\n            sum += word1;\n\n            word1 = (word1 << 8) | word1;\n            word1 = (word1 << 16) | word1; /* make it 4 */\n\n            *(uint32*)(pred += 16) = word1;\n            *(uint32*)(pred + 4) = word1;\n            *(uint32*)(pred + 8) = word1;\n            *(uint32*)(pred + 12) = word1;\n        }\n\n        if (!video->intraAvailB)\n        {\n            sum = (sum + 8) >> 4;\n        }\n        else\n        {\n            sum = (sum + 16) >> 5;\n        }\n    }\n\n    // get DC mode\n    if (!video->intraAvailA && !video->intraAvailB)\n    {\n        sum = 0x80808080;\n    }\n    else\n    {\n        sum = (sum << 8) | sum;\n        sum = (sum << 16) | sum;\n    }\n\n    pred = encvid->pred_i16[AVC_I16_DC] - 16;\n    for (i = 0; i < 16; i++)\n    {\n        *((uint32*)(pred += 16)) = sum;\n        *((uint32*)(pred + 4)) = sum;\n        *((uint32*)(pred + 8)) = sum;\n        *((uint32*)(pred + 12)) = sum;\n    }\n\n    // get plane mode\n    if (video->intraAvailA && video->intraAvailB && video->intraAvailD)\n    {\n        pred = encvid->pred_i16[AVC_I16_Plane] - 16;\n\n        comp_ref_x0 = curL - pitch + 8;\n        comp_ref_x1 = curL - pitch + 6;\n        comp_ref_y0 = curL - 1 + (pitch << 3);\n        comp_ref_y1 = curL - 1 + 6 * pitch;\n\n        for (i = 1; i < 8; i++)\n        {\n            H += i * (*comp_ref_x0++ - *comp_ref_x1--);\n            V += i * (*comp_ref_y0 - *comp_ref_y1);\n            comp_ref_y0 += pitch;\n            comp_ref_y1 -= pitch;\n        }\n\n        H += i * (*comp_ref_x0++ - curL[-pitch-1]);\n        V += i * (*comp_ref_y0 - *comp_ref_y1);\n\n\n        a_16 = ((*(curL - pitch + 15) + *(curL - 1 + 15 * pitch)) << 4) + 16;;\n        b = (5 * H + 32) >> 6;\n        c = (5 * V + 32) >> 6;\n\n        tmp = 0;\n        for (i = 0; i < 16; i++)\n        {\n            factor_c = a_16 + c * (tmp++ - 7);\n            factor_c -= 7 * b;\n\n            value = factor_c >> 5;\n            factor_c += b;\n            CLIP_RESULT(value)\n            word1 = value;\n            value = factor_c >> 5;\n            factor_c += b;\n            CLIP_RESULT(value)\n            word1 = (word1) | (value << 8);\n            value = factor_c >> 5;\n            factor_c += b;\n            CLIP_RESULT(value)\n            word1 = (word1) | (value << 16);\n            value = factor_c >> 5;\n            factor_c += b;\n            CLIP_RESULT(value)\n            word1 = (word1) | (value << 24);\n            *((uint32*)(pred += 16)) = word1;\n            value = factor_c >> 5;\n            factor_c += b;\n            CLIP_RESULT(value)\n            word1 = value;\n            value = factor_c >> 5;\n            factor_c += b;\n            CLIP_RESULT(value)\n            word1 = (word1) | (value << 8);\n            value = factor_c >> 5;\n            factor_c += b;\n            CLIP_RESULT(value)\n            word1 = (word1) | (value << 16);\n            value = factor_c >> 5;\n            factor_c += b;\n            CLIP_RESULT(value)\n            word1 = (word1) | (value << 24);\n            *((uint32*)(pred + 4)) = word1;\n            value = factor_c >> 5;\n            factor_c += b;\n            CLIP_RESULT(value)\n            word1 = value;\n            value = factor_c >> 5;\n            factor_c += b;\n            CLIP_RESULT(value)\n            word1 = (word1) | (value << 8);\n            value = factor_c >> 5;\n            factor_c += b;\n            CLIP_RESULT(value)\n            word1 = (word1) | (value << 16);\n            value = factor_c >> 5;\n            factor_c += b;\n            CLIP_RESULT(value)\n            word1 = (word1) | (value << 24);\n            *((uint32*)(pred + 8)) = word1;\n            value = factor_c >> 5;\n            factor_c += b;\n            CLIP_RESULT(value)\n            word1 = value;\n            value = factor_c >> 5;\n            factor_c += b;\n            CLIP_RESULT(value)\n            word1 = (word1) | (value << 8);\n            value = factor_c >> 5;\n            factor_c += b;\n            CLIP_RESULT(value)\n            word1 = (word1) | (value << 16);\n            value = factor_c >> 5;\n            CLIP_RESULT(value)\n            word1 = (word1) | (value << 24);\n            *((uint32*)(pred + 12)) = word1;\n        }\n    }\n\n    return ;\n}\n\n\n/* evaluate each prediction mode of I16 */\nvoid find_cost_16x16(AVCEncObject *encvid, uint8 *orgY, int *min_cost)\n{\n    AVCCommonObj *video = encvid->common;\n    AVCMacroblock *currMB = video->currMB;\n    int cost;\n    int org_pitch = encvid->currInput->pitch;\n\n    /* evaluate vertical mode */\n    if (video->intraAvailB)\n    {\n        cost = cost_i16(orgY, org_pitch, encvid->pred_i16[AVC_I16_Vertical], *min_cost);\n        if (cost < *min_cost)\n        {\n            *min_cost = cost;\n            currMB->mbMode = AVC_I16;\n            currMB->mb_intra = 1;\n            currMB->i16Mode = AVC_I16_Vertical;\n        }\n    }\n\n\n    /* evaluate horizontal mode */\n    if (video->intraAvailA)\n    {\n        cost = cost_i16(orgY, org_pitch, encvid->pred_i16[AVC_I16_Horizontal], *min_cost);\n        if (cost < *min_cost)\n        {\n            *min_cost = cost;\n            currMB->mbMode = AVC_I16;\n            currMB->mb_intra = 1;\n            currMB->i16Mode = AVC_I16_Horizontal;\n        }\n    }\n\n    /* evaluate DC mode */\n    cost = cost_i16(orgY, org_pitch, encvid->pred_i16[AVC_I16_DC], *min_cost);\n    if (cost < *min_cost)\n    {\n        *min_cost = cost;\n        currMB->mbMode = AVC_I16;\n        currMB->mb_intra = 1;\n        currMB->i16Mode = AVC_I16_DC;\n    }\n\n    /* evaluate plane mode */\n    if (video->intraAvailA && video->intraAvailB && video->intraAvailD)\n    {\n        cost = cost_i16(orgY, org_pitch, encvid->pred_i16[AVC_I16_Plane], *min_cost);\n        if (cost < *min_cost)\n        {\n            *min_cost = cost;\n            currMB->mbMode = AVC_I16;\n            currMB->mb_intra = 1;\n            currMB->i16Mode = AVC_I16_Plane;\n        }\n    }\n\n    return ;\n}\n\n\nint cost_i16(uint8 *org, int org_pitch, uint8 *pred, int min_cost)\n{\n\n    int cost;\n    int j, k;\n    int16 res[256], *pres; // residue\n    int m0, m1, m2, m3;\n\n    // calculate SATD\n    org_pitch -= 16;\n    pres = res;\n    // horizontal transform\n    for (j = 0; j < 16; j++)\n    {\n        k = 4;\n        while (k > 0)\n        {\n            m0 = org[0] - pred[0];\n            m3 = org[3] - pred[3];\n            m0 += m3;\n            m3 = m0 - (m3 << 1);\n            m1 = org[1] - pred[1];\n            m2 = org[2] - pred[2];\n            m1 += m2;\n            m2 = m1 - (m2 << 1);\n            pres[0] = m0 + m1;\n            pres[2] = m0 - m1;\n            pres[1] = m2 + m3;\n            pres[3] = m3 - m2;\n\n            org += 4;\n            pres += 4;\n            pred += 4;\n            k--;\n        }\n        org += org_pitch;\n    }\n    /* vertical transform */\n    cost = 0;\n    for (j = 0; j < 4; j++)\n    {\n        pres = res + (j << 6);\n        k = 16;\n        while (k > 0)\n        {\n            m0 = pres[0];\n            m3 = pres[3<<4];\n            m0 += m3;\n            m3 = m0 - (m3 << 1);\n            m1 = pres[1<<4];\n            m2 = pres[2<<4];\n            m1 += m2;\n            m2 = m1 - (m2 << 1);\n            pres[0] = m0 = m0 + m1;\n\n            if (k&0x3)  // only sum up non DC values.\n            {\n                cost += ((m0 > 0) ? m0 : -m0);\n            }\n\n            m1 = m0 - (m1 << 1);\n            cost += ((m1 > 0) ? m1 : -m1);\n            m3 = m2 + m3;\n            cost += ((m3 > 0) ? m3 : -m3);\n            m2 = m3 - (m2 << 1);\n            cost += ((m2 > 0) ? m2 : -m2);\n\n            pres++;\n            k--;\n        }\n        if ((cost >> 1) > min_cost) /* early drop out */\n        {\n            return (cost >> 1);\n        }\n    }\n\n    /* Hadamard of the DC coefficient */\n    pres = res;\n    k = 4;\n    while (k > 0)\n    {\n        m0 = pres[0];\n        m3 = pres[3<<2];\n        m0 >>= 2;\n        m0 += (m3 >> 2);\n        m3 = m0 - (m3 >> 1);\n        m1 = pres[1<<2];\n        m2 = pres[2<<2];\n        m1 >>= 2;\n        m1 += (m2 >> 2);\n        m2 = m1 - (m2 >> 1);\n        pres[0] = (m0 + m1);\n        pres[2<<2] = (m0 - m1);\n        pres[1<<2] = (m2 + m3);\n        pres[3<<2] = (m3 - m2);\n        pres += (4 << 4);\n        k--;\n    }\n\n    pres = res;\n    k = 4;\n    while (k > 0)\n    {\n        m0 = pres[0];\n        m3 = pres[3<<6];\n        m0 += m3;\n        m3 = m0 - (m3 << 1);\n        m1 = pres[1<<6];\n        m2 = pres[2<<6];\n        m1 += m2;\n        m2 = m1 - (m2 << 1);\n        m0 = m0 + m1;\n        cost += ((m0 >= 0) ? m0 : -m0);\n        m1 = m0 - (m1 << 1);\n        cost += ((m1 >= 0) ? m1 : -m1);\n        m3 = m2 + m3;\n        cost += ((m3 >= 0) ? m3 : -m3);\n        m2 = m3 - (m2 << 1);\n        cost += ((m2 >= 0) ? m2 : -m2);\n        pres += 4;\n\n        if ((cost >> 1) > min_cost) /* early drop out */\n        {\n            return (cost >> 1);\n        }\n\n        k--;\n    }\n\n    return (cost >> 1);\n}\n\n\nvoid mb_intra4x4_search(AVCEncObject *encvid, int *min_cost)\n{\n    AVCCommonObj *video = encvid->common;\n    AVCMacroblock *currMB = video->currMB;\n    AVCPictureData *currPic = video->currPic;\n    AVCFrameIO *currInput = encvid->currInput;\n    int pitch = currPic->pitch;\n    int org_pitch = currInput->pitch;\n    int offset;\n    uint8 *curL, *comp, *org4, *org8;\n    int y = video->mb_y << 4;\n    int x = video->mb_x << 4;\n\n    int b8, b4, cost4x4, blkidx;\n    int cost = 0;\n    int numcoef;\n    int dummy = 0;\n    int mb_intra = currMB->mb_intra; // save the original value\n\n    offset = y * pitch + x;\n\n    curL = currPic->Sl + offset;\n    org8 = currInput->YCbCr[0] + y * org_pitch + x;\n    video->pred_pitch = 4;\n\n    cost = (int)(6.0 * encvid->lambda_mode + 0.4999);\n    cost <<= 2;\n\n    currMB->mb_intra = 1;  // temporary set this to one to enable the IDCT\n    // operation inside dct_luma\n\n    for (b8 = 0; b8 < 4; b8++)\n    {\n        comp = curL;\n        org4 = org8;\n\n        for (b4 = 0; b4 < 4; b4++)\n        {\n            blkidx = blkIdx2blkXY[b8][b4];\n            cost4x4 = blk_intra4x4_search(encvid, blkidx, comp, org4);\n            cost += cost4x4;\n            if (cost > *min_cost)\n            {\n                currMB->mb_intra = mb_intra; // restore the value\n                return ;\n            }\n\n            /* do residue, Xfrm, Q, invQ, invXfrm, recon and save the DCT coefs.*/\n            video->pred_block = encvid->pred_i4[currMB->i4Mode[blkidx]];\n            numcoef = dct_luma(encvid, blkidx, comp, org4, &dummy);\n            currMB->nz_coeff[blkidx] = numcoef;\n            if (numcoef)\n            {\n                video->cbp4x4 |= (1 << blkidx);\n                currMB->CBP |= (1 << b8);\n            }\n\n            if (b4&1)\n            {\n                comp += ((pitch << 2) - 4);\n                org4 += ((org_pitch << 2) - 4);\n            }\n            else\n            {\n                comp += 4;\n                org4 += 4;\n            }\n        }\n\n        if (b8&1)\n        {\n            curL += ((pitch << 3) - 8);\n            org8 += ((org_pitch << 3) - 8);\n        }\n        else\n        {\n            curL += 8;\n            org8 += 8;\n        }\n    }\n\n    currMB->mb_intra = mb_intra; // restore the value\n\n    if (cost < *min_cost)\n    {\n        *min_cost = cost;\n        currMB->mbMode = AVC_I4;\n        currMB->mb_intra = 1;\n    }\n\n    return ;\n}\n\n\n/* search for i4 mode for a 4x4 block */\nint blk_intra4x4_search(AVCEncObject *encvid, int blkidx, uint8 *cur, uint8 *org)\n{\n    AVCCommonObj *video = encvid->common;\n    AVCNeighborAvailability availability;\n    AVCMacroblock *currMB = video->currMB;\n    bool top_left = FALSE;\n    int pitch = video->currPic->pitch;\n    uint8 mode_avail[AVCNumI4PredMode];\n    uint32 temp, DC;\n    uint8 *pred;\n    int org_pitch = encvid->currInput->pitch;\n    uint16 min_cost, cost;\n\n    int P_x, Q_x, R_x, P_y, Q_y, R_y, D, D0, D1;\n    int P0, Q0, R0, S0, P1, Q1, R1, P2, Q2;\n    uint8 P_A, P_B, P_C, P_D, P_E, P_F, P_G, P_H, P_I, P_J, P_K, P_L, P_X;\n    int r0, r1, r2, r3, r4, r5, r6, r7;\n    int x0, x1, x2, x3, x4, x5;\n    uint32 temp1, temp2;\n\n    int ipmode, mostProbableMode;\n    int fixedcost = 4 * encvid->lambda_mode;\n    int min_sad = 0x7FFF;\n\n    availability.left = TRUE;\n    availability.top = TRUE;\n    if (blkidx <= 3) /* top row block  (!block_y) */\n    { /* check availability up */\n        availability.top = video->intraAvailB ;\n    }\n    if (!(blkidx&0x3)) /* left column block (!block_x)*/\n    { /* check availability left */\n        availability.left = video->intraAvailA ;\n    }\n    availability.top_right = BlkTopRight[blkidx];\n\n    if (availability.top_right == 2)\n    {\n        availability.top_right = video->intraAvailB;\n    }\n    else if (availability.top_right == 3)\n    {\n        availability.top_right = video->intraAvailC;\n    }\n\n    if (availability.top == TRUE)\n    {\n        temp = *(uint32*)(cur - pitch);\n        P_A = temp & 0xFF;\n        P_B = (temp >> 8) & 0xFF;\n        P_C = (temp >> 16) & 0xFF;\n        P_D = (temp >> 24) & 0xFF;\n    }\n    else\n    {\n        P_A = P_B = P_C = P_D = 128;\n    }\n\n    if (availability.top_right == TRUE)\n    {\n        temp = *(uint32*)(cur - pitch + 4);\n        P_E = temp & 0xFF;\n        P_F = (temp >> 8) & 0xFF;\n        P_G = (temp >> 16) & 0xFF;\n        P_H = (temp >> 24) & 0xFF;\n    }\n    else\n    {\n        P_E = P_F = P_G = P_H = 128;\n    }\n\n    if (availability.left == TRUE)\n    {\n        cur--;\n        P_I = *cur;\n        P_J = *(cur += pitch);\n        P_K = *(cur += pitch);\n        P_L = *(cur + pitch);\n        cur -= (pitch << 1);\n        cur++;\n    }\n    else\n    {\n        P_I = P_J = P_K = P_L = 128;\n    }\n\n    /* check if top-left pixel is available */\n    if (((blkidx > 3) && (blkidx&0x3)) || ((blkidx > 3) && video->intraAvailA)\n            || ((blkidx&0x3) && video->intraAvailB)\n            || (video->intraAvailA && video->intraAvailD && video->intraAvailB))\n    {\n        top_left = TRUE;\n        P_X = *(cur - pitch - 1);\n    }\n    else\n    {\n        P_X = 128;\n    }\n\n    //===== INTRA PREDICTION FOR 4x4 BLOCK =====\n    /* vertical */\n    mode_avail[AVC_I4_Vertical] = 0;\n    if (availability.top)\n    {\n        mode_avail[AVC_I4_Vertical] = 1;\n        pred = encvid->pred_i4[AVC_I4_Vertical];\n\n        temp = (P_D << 24) | (P_C << 16) | (P_B << 8) | P_A ;\n        *((uint32*)pred) =  temp; /* write 4 at a time */\n        *((uint32*)(pred += 4)) =  temp;\n        *((uint32*)(pred += 4)) =  temp;\n        *((uint32*)(pred += 4)) =  temp;\n    }\n    /* horizontal */\n    mode_avail[AVC_I4_Horizontal] = 0;\n    mode_avail[AVC_I4_Horizontal_Up] = 0;\n    if (availability.left)\n    {\n        mode_avail[AVC_I4_Horizontal] = 1;\n        pred = encvid->pred_i4[AVC_I4_Horizontal];\n\n        temp = P_I | (P_I << 8);\n        temp = temp | (temp << 16);\n        *((uint32*)pred) = temp;\n        temp = P_J | (P_J << 8);\n        temp = temp | (temp << 16);\n        *((uint32*)(pred += 4)) = temp;\n        temp = P_K | (P_K << 8);\n        temp = temp | (temp << 16);\n        *((uint32*)(pred += 4)) = temp;\n        temp = P_L | (P_L << 8);\n        temp = temp | (temp << 16);\n        *((uint32*)(pred += 4)) = temp;\n\n        mode_avail[AVC_I4_Horizontal_Up] = 1;\n        pred = encvid->pred_i4[AVC_I4_Horizontal_Up];\n\n        Q0 = (P_J + P_K + 1) >> 1;\n        Q1 = (P_J + (P_K << 1) + P_L + 2) >> 2;\n        P0 = ((P_I + P_J + 1) >> 1);\n        P1 = ((P_I + (P_J << 1) + P_K + 2) >> 2);\n\n        temp = P0 | (P1 << 8);      // [P0 P1 Q0 Q1]\n        temp |= (Q0 << 16);     // [Q0 Q1 R0 DO]\n        temp |= (Q1 << 24);     // [R0 D0 D1 D1]\n        *((uint32*)pred) = temp;      // [D1 D1 D1 D1]\n\n        D0 = (P_K + 3 * P_L + 2) >> 2;\n        R0 = (P_K + P_L + 1) >> 1;\n\n        temp = Q0 | (Q1 << 8);\n        temp |= (R0 << 16);\n        temp |= (D0 << 24);\n        *((uint32*)(pred += 4)) = temp;\n\n        D1 = P_L;\n\n        temp = R0 | (D0 << 8);\n        temp |= (D1 << 16);\n        temp |= (D1 << 24);\n        *((uint32*)(pred += 4)) = temp;\n\n        temp = D1 | (D1 << 8);\n        temp |= (temp << 16);\n        *((uint32*)(pred += 4)) = temp;\n    }\n    /* DC */\n    mode_avail[AVC_I4_DC] = 1;\n    pred = encvid->pred_i4[AVC_I4_DC];\n    if (availability.left)\n    {\n        DC = P_I + P_J + P_K + P_L;\n\n        if (availability.top)\n        {\n            DC = (P_A + P_B + P_C + P_D + DC + 4) >> 3;\n        }\n        else\n        {\n            DC = (DC + 2) >> 2;\n\n        }\n    }\n    else if (availability.top)\n    {\n        DC = (P_A + P_B + P_C + P_D + 2) >> 2;\n\n    }\n    else\n    {\n        DC = 128;\n    }\n\n    temp = DC | (DC << 8);\n    temp = temp | (temp << 16);\n    *((uint32*)pred) = temp;\n    *((uint32*)(pred += 4)) = temp;\n    *((uint32*)(pred += 4)) = temp;\n    *((uint32*)(pred += 4)) = temp;\n\n    /* Down-left */\n    mode_avail[AVC_I4_Diagonal_Down_Left] = 0;\n\n    if (availability.top)\n    {\n        mode_avail[AVC_I4_Diagonal_Down_Left] = 1;\n\n        pred = encvid->pred_i4[AVC_I4_Diagonal_Down_Left];\n\n        r0 = P_A;\n        r1 = P_B;\n        r2 = P_C;\n        r3 = P_D;\n\n        r0 += (r1 << 1);\n        r0 += r2;\n        r0 += 2;\n        r0 >>= 2;\n        r1 += (r2 << 1);\n        r1 += r3;\n        r1 += 2;\n        r1 >>= 2;\n\n        if (availability.top_right)\n        {\n            r4 = P_E;\n            r5 = P_F;\n            r6 = P_G;\n            r7 = P_H;\n\n            r2 += (r3 << 1);\n            r2 += r4;\n            r2 += 2;\n            r2 >>= 2;\n            r3 += (r4 << 1);\n            r3 += r5;\n            r3 += 2;\n            r3 >>= 2;\n            r4 += (r5 << 1);\n            r4 += r6;\n            r4 += 2;\n            r4 >>= 2;\n            r5 += (r6 << 1);\n            r5 += r7;\n            r5 += 2;\n            r5 >>= 2;\n            r6 += (3 * r7);\n            r6 += 2;\n            r6 >>= 2;\n            temp = r0 | (r1 << 8);\n            temp |= (r2 << 16);\n            temp |= (r3 << 24);\n            *((uint32*)pred) = temp;\n\n            temp = (temp >> 8) | (r4 << 24);\n            *((uint32*)(pred += 4)) = temp;\n\n            temp = (temp >> 8) | (r5 << 24);\n            *((uint32*)(pred += 4)) = temp;\n\n            temp = (temp >> 8) | (r6 << 24);\n            *((uint32*)(pred += 4)) = temp;\n        }\n        else\n        {\n            r2 += (r3 * 3);\n            r2 += 2;\n            r2 >>= 2;\n            r3 = ((r3 << 2) + 2);\n            r3 >>= 2;\n\n            temp = r0 | (r1 << 8);\n            temp |= (r2 << 16);\n            temp |= (r3 << 24);\n            *((uint32*)pred) = temp;\n\n            temp = (temp >> 8) | (r3 << 24);\n            *((uint32*)(pred += 4)) = temp;\n\n            temp = (temp >> 8) | (r3 << 24);\n            *((uint32*)(pred += 4)) = temp;\n\n            temp = (temp >> 8) | (r3 << 24);\n            *((uint32*)(pred += 4)) = temp;\n\n        }\n    }\n\n    /* Down Right */\n    mode_avail[AVC_I4_Diagonal_Down_Right] = 0;\n    /* Diagonal Vertical Right */\n    mode_avail[AVC_I4_Vertical_Right] = 0;\n    /* Horizontal Down */\n    mode_avail[AVC_I4_Horizontal_Down] = 0;\n\n    if (top_left == TRUE)\n    {\n        /* Down Right */\n        mode_avail[AVC_I4_Diagonal_Down_Right] = 1;\n        pred = encvid->pred_i4[AVC_I4_Diagonal_Down_Right];\n\n        Q_x = (P_A + 2 * P_B + P_C + 2) >> 2;\n        R_x = (P_B + 2 * P_C + P_D + 2) >> 2;\n        P_x = (P_X + 2 * P_A + P_B + 2) >> 2;\n        D   = (P_A + 2 * P_X + P_I + 2) >> 2;\n        P_y = (P_X + 2 * P_I + P_J + 2) >> 2;\n        Q_y = (P_I + 2 * P_J + P_K + 2) >> 2;\n        R_y = (P_J + 2 * P_K + P_L + 2) >> 2;\n\n        /* we can pack these */\n        temp =  D | (P_x << 8);   //[D   P_x Q_x R_x]\n        //[P_y D   P_x Q_x]\n        temp |= (Q_x << 16); //[Q_y P_y D   P_x]\n        temp |= (R_x << 24);  //[R_y Q_y P_y D  ]\n        *((uint32*)pred) = temp;\n\n        temp =  P_y | (D << 8);\n        temp |= (P_x << 16);\n        temp |= (Q_x << 24);\n        *((uint32*)(pred += 4)) = temp;\n\n        temp =  Q_y | (P_y << 8);\n        temp |= (D << 16);\n        temp |= (P_x << 24);\n        *((uint32*)(pred += 4)) = temp;\n\n        temp = R_y | (Q_y << 8);\n        temp |= (P_y << 16);\n        temp |= (D << 24);\n        *((uint32*)(pred += 4)) = temp;\n\n\n        /* Diagonal Vertical Right */\n        mode_avail[AVC_I4_Vertical_Right] = 1;\n        pred = encvid->pred_i4[AVC_I4_Vertical_Right];\n\n        Q0 = P_A + P_B + 1;\n        R0 = P_B + P_C + 1;\n        S0 = P_C + P_D + 1;\n        P0 = P_X + P_A + 1;\n        D = (P_I + 2 * P_X + P_A + 2) >> 2;\n\n        P1 = (P0 + Q0) >> 2;\n        Q1 = (Q0 + R0) >> 2;\n        R1 = (R0 + S0) >> 2;\n\n        P0 >>= 1;\n        Q0 >>= 1;\n        R0 >>= 1;\n        S0 >>= 1;\n\n        P2 = (P_X + 2 * P_I + P_J + 2) >> 2;\n        Q2 = (P_I + 2 * P_J + P_K + 2) >> 2;\n\n        temp =  P0 | (Q0 << 8);  //[P0 Q0 R0 S0]\n        //[D  P1 Q1 R1]\n        temp |= (R0 << 16); //[P2 P0 Q0 R0]\n        temp |= (S0 << 24); //[Q2 D  P1 Q1]\n        *((uint32*)pred) =  temp;\n\n        temp =  D | (P1 << 8);\n        temp |= (Q1 << 16);\n        temp |= (R1 << 24);\n        *((uint32*)(pred += 4)) =  temp;\n\n        temp = P2 | (P0 << 8);\n        temp |= (Q0 << 16);\n        temp |= (R0 << 24);\n        *((uint32*)(pred += 4)) =  temp;\n\n        temp = Q2 | (D << 8);\n        temp |= (P1 << 16);\n        temp |= (Q1 << 24);\n        *((uint32*)(pred += 4)) =  temp;\n\n\n        /* Horizontal Down */\n        mode_avail[AVC_I4_Horizontal_Down] = 1;\n        pred = encvid->pred_i4[AVC_I4_Horizontal_Down];\n\n\n        Q2 = (P_A + 2 * P_B + P_C + 2) >> 2;\n        P2 = (P_X + 2 * P_A + P_B + 2) >> 2;\n        D = (P_I + 2 * P_X + P_A + 2) >> 2;\n        P0 = P_X + P_I + 1;\n        Q0 = P_I + P_J + 1;\n        R0 = P_J + P_K + 1;\n        S0 = P_K + P_L + 1;\n\n        P1 = (P0 + Q0) >> 2;\n        Q1 = (Q0 + R0) >> 2;\n        R1 = (R0 + S0) >> 2;\n\n        P0 >>= 1;\n        Q0 >>= 1;\n        R0 >>= 1;\n        S0 >>= 1;\n\n\n        /* we can pack these */\n        temp = P0 | (D << 8);   //[P0 D  P2 Q2]\n        //[Q0 P1 P0 D ]\n        temp |= (P2 << 16);  //[R0 Q1 Q0 P1]\n        temp |= (Q2 << 24); //[S0 R1 R0 Q1]\n        *((uint32*)pred) = temp;\n\n        temp = Q0 | (P1 << 8);\n        temp |= (P0 << 16);\n        temp |= (D << 24);\n        *((uint32*)(pred += 4)) = temp;\n\n        temp = R0 | (Q1 << 8);\n        temp |= (Q0 << 16);\n        temp |= (P1 << 24);\n        *((uint32*)(pred += 4)) = temp;\n\n        temp = S0 | (R1 << 8);\n        temp |= (R0 << 16);\n        temp |= (Q1 << 24);\n        *((uint32*)(pred += 4)) = temp;\n\n    }\n\n    /* vertical left */\n    mode_avail[AVC_I4_Vertical_Left] = 0;\n    if (availability.top)\n    {\n        mode_avail[AVC_I4_Vertical_Left] = 1;\n        pred = encvid->pred_i4[AVC_I4_Vertical_Left];\n\n        x0 = P_A + P_B + 1;\n        x1 = P_B + P_C + 1;\n        x2 = P_C + P_D + 1;\n        if (availability.top_right)\n        {\n            x3 = P_D + P_E + 1;\n            x4 = P_E + P_F + 1;\n            x5 = P_F + P_G + 1;\n        }\n        else\n        {\n            x3 = x4 = x5 = (P_D << 1) + 1;\n        }\n\n        temp1 = (x0 >> 1);\n        temp1 |= ((x1 >> 1) << 8);\n        temp1 |= ((x2 >> 1) << 16);\n        temp1 |= ((x3 >> 1) << 24);\n\n        *((uint32*)pred) = temp1;\n\n        temp2 = ((x0 + x1) >> 2);\n        temp2 |= (((x1 + x2) >> 2) << 8);\n        temp2 |= (((x2 + x3) >> 2) << 16);\n        temp2 |= (((x3 + x4) >> 2) << 24);\n\n        *((uint32*)(pred += 4)) = temp2;\n\n        temp1 = (temp1 >> 8) | ((x4 >> 1) << 24);   /* rotate out old value */\n        *((uint32*)(pred += 4)) = temp1;\n\n        temp2 = (temp2 >> 8) | (((x4 + x5) >> 2) << 24); /* rotate out old value */\n        *((uint32*)(pred += 4)) = temp2;\n    }\n\n    //===== LOOP OVER ALL 4x4 INTRA PREDICTION MODES =====\n    // can re-order the search here instead of going in order\n\n    // find most probable mode\n    encvid->mostProbableI4Mode[blkidx] = mostProbableMode = FindMostProbableI4Mode(video, blkidx);\n\n    min_cost = 0xFFFF;\n\n    for (ipmode = 0; ipmode < AVCNumI4PredMode; ipmode++)\n    {\n        if (mode_avail[ipmode] == TRUE)\n        {\n            cost  = (ipmode == mostProbableMode) ? 0 : fixedcost;\n            pred = encvid->pred_i4[ipmode];\n\n            cost_i4(org, org_pitch, pred, &cost);\n\n            if (cost < min_cost)\n            {\n                currMB->i4Mode[blkidx] = (AVCIntra4x4PredMode)ipmode;\n                min_cost   = cost;\n                min_sad = cost - ((ipmode == mostProbableMode) ? 0 : fixedcost);\n            }\n        }\n    }\n\n    if (blkidx == 0)\n    {\n        encvid->i4_sad = min_sad;\n    }\n    else\n    {\n        encvid->i4_sad += min_sad;\n    }\n\n    return min_cost;\n}\n\nint FindMostProbableI4Mode(AVCCommonObj *video, int blkidx)\n{\n    int dcOnlyPredictionFlag;\n    AVCMacroblock *currMB = video->currMB;\n    int intra4x4PredModeA, intra4x4PredModeB, predIntra4x4PredMode;\n\n\n    dcOnlyPredictionFlag = 0;\n    if (blkidx&0x3)\n    {\n        intra4x4PredModeA = currMB->i4Mode[blkidx-1]; // block to the left\n    }\n    else /* for blk 0, 4, 8, 12 */\n    {\n        if (video->intraAvailA)\n        {\n            if (video->mblock[video->mbAddrA].mbMode == AVC_I4)\n            {\n                intra4x4PredModeA = video->mblock[video->mbAddrA].i4Mode[blkidx + 3];\n            }\n            else\n            {\n                intra4x4PredModeA = AVC_I4_DC;\n            }\n        }\n        else\n        {\n            dcOnlyPredictionFlag = 1;\n            goto PRED_RESULT_READY;  // skip below\n        }\n    }\n\n    if (blkidx >> 2)\n    {\n        intra4x4PredModeB = currMB->i4Mode[blkidx-4]; // block above\n    }\n    else /* block 0, 1, 2, 3 */\n    {\n        if (video->intraAvailB)\n        {\n            if (video->mblock[video->mbAddrB].mbMode == AVC_I4)\n            {\n                intra4x4PredModeB = video->mblock[video->mbAddrB].i4Mode[blkidx+12];\n            }\n            else\n            {\n                intra4x4PredModeB = AVC_I4_DC;\n            }\n        }\n        else\n        {\n            dcOnlyPredictionFlag = 1;\n        }\n    }\n\nPRED_RESULT_READY:\n    if (dcOnlyPredictionFlag)\n    {\n        intra4x4PredModeA = intra4x4PredModeB = AVC_I4_DC;\n    }\n\n    predIntra4x4PredMode = AVC_MIN(intra4x4PredModeA, intra4x4PredModeB);\n\n    return predIntra4x4PredMode;\n}\n\nvoid cost_i4(uint8 *org, int org_pitch, uint8 *pred, uint16 *cost)\n{\n    int k;\n    int16 res[16], *pres;\n    int m0, m1, m2, m3, tmp1;\n    int satd = 0;\n\n    pres = res;\n    // horizontal transform\n    k = 4;\n    while (k > 0)\n    {\n        m0 = org[0] - pred[0];\n        m3 = org[3] - pred[3];\n        m0 += m3;\n        m3 = m0 - (m3 << 1);\n        m1 = org[1] - pred[1];\n        m2 = org[2] - pred[2];\n        m1 += m2;\n        m2 = m1 - (m2 << 1);\n        pres[0] = m0 + m1;\n        pres[2] = m0 - m1;\n        pres[1] = m2 + m3;\n        pres[3] = m3 - m2;\n\n        org += org_pitch;\n        pres += 4;\n        pred += 4;\n        k--;\n    }\n    /* vertical transform */\n    pres = res;\n    k = 4;\n    while (k > 0)\n    {\n        m0 = pres[0];\n        m3 = pres[12];\n        m0 += m3;\n        m3 = m0 - (m3 << 1);\n        m1 = pres[4];\n        m2 = pres[8];\n        m1 += m2;\n        m2 = m1 - (m2 << 1);\n        pres[0] = m0 + m1;\n        pres[8] = m0 - m1;\n        pres[4] = m2 + m3;\n        pres[12] = m3 - m2;\n\n        pres++;\n        k--;\n\n    }\n\n    pres = res;\n    k = 4;\n    while (k > 0)\n    {\n        tmp1 = *pres++;\n        satd += ((tmp1 >= 0) ? tmp1 : -tmp1);\n        tmp1 = *pres++;\n        satd += ((tmp1 >= 0) ? tmp1 : -tmp1);\n        tmp1 = *pres++;\n        satd += ((tmp1 >= 0) ? tmp1 : -tmp1);\n        tmp1 = *pres++;\n        satd += ((tmp1 >= 0) ? tmp1 : -tmp1);\n        k--;\n    }\n\n    satd = (satd + 1) >> 1;\n    *cost += satd;\n\n    return ;\n}\n\nvoid chroma_intra_search(AVCEncObject *encvid)\n{\n    AVCCommonObj *video = encvid->common;\n    AVCPictureData *currPic = video->currPic;\n\n    int x_pos = video->mb_x << 3;\n    int y_pos = video->mb_y << 3;\n    int pitch = currPic->pitch >> 1;\n    int offset = y_pos * pitch + x_pos;\n\n    uint8 *comp_ref_x, *comp_ref_y, *pred;\n    int  sum_x0, sum_x1, sum_y0, sum_y1;\n    int pred_0[2], pred_1[2], pred_2[2], pred_3[2];\n    uint32 pred_a, pred_b, pred_c, pred_d;\n    int i, j, component;\n    int a_16, b, c, factor_c, topleft;\n    int H, V, value;\n    uint8 *comp_ref_x0, *comp_ref_x1,  *comp_ref_y0, *comp_ref_y1;\n\n    uint8 *curCb = currPic->Scb + offset;\n    uint8 *curCr = currPic->Scr + offset;\n\n    uint8 *orgCb, *orgCr;\n    AVCFrameIO *currInput = encvid->currInput;\n    AVCMacroblock *currMB = video->currMB;\n    int org_pitch;\n    int cost, mincost;\n\n    /* evaluate DC mode */\n    if (video->intraAvailB & video->intraAvailA)\n    {\n        comp_ref_x = curCb - pitch;\n        comp_ref_y = curCb - 1;\n\n        for (i = 0; i < 2; i++)\n        {\n            pred_a = *((uint32*)comp_ref_x);\n            comp_ref_x += 4;\n            pred_b = (pred_a >> 8) & 0xFF00FF;\n            pred_a &= 0xFF00FF;\n            pred_a += pred_b;\n            pred_a += (pred_a >> 16);\n            sum_x0 = pred_a & 0xFFFF;\n\n            pred_a = *((uint32*)comp_ref_x);\n            pred_b = (pred_a >> 8) & 0xFF00FF;\n            pred_a &= 0xFF00FF;\n            pred_a += pred_b;\n            pred_a += (pred_a >> 16);\n            sum_x1 = pred_a & 0xFFFF;\n\n            pred_1[i] = (sum_x1 + 2) >> 2;\n\n            sum_y0 = *comp_ref_y;\n            sum_y0 += *(comp_ref_y += pitch);\n            sum_y0 += *(comp_ref_y += pitch);\n            sum_y0 += *(comp_ref_y += pitch);\n\n            sum_y1 = *(comp_ref_y += pitch);\n            sum_y1 += *(comp_ref_y += pitch);\n            sum_y1 += *(comp_ref_y += pitch);\n            sum_y1 += *(comp_ref_y += pitch);\n\n            pred_2[i] = (sum_y1 + 2) >> 2;\n\n            pred_0[i] = (sum_y0 + sum_x0 + 4) >> 3;\n            pred_3[i] = (sum_y1 + sum_x1 + 4) >> 3;\n\n            comp_ref_x = curCr - pitch;\n            comp_ref_y = curCr - 1;\n        }\n    }\n\n    else if (video->intraAvailA)\n    {\n        comp_ref_y = curCb - 1;\n        for (i = 0; i < 2; i++)\n        {\n            sum_y0 = *comp_ref_y;\n            sum_y0 += *(comp_ref_y += pitch);\n            sum_y0 += *(comp_ref_y += pitch);\n            sum_y0 += *(comp_ref_y += pitch);\n\n            sum_y1 = *(comp_ref_y += pitch);\n            sum_y1 += *(comp_ref_y += pitch);\n            sum_y1 += *(comp_ref_y += pitch);\n            sum_y1 += *(comp_ref_y += pitch);\n\n            pred_0[i] = pred_1[i] = (sum_y0 + 2) >> 2;\n            pred_2[i] = pred_3[i] = (sum_y1 + 2) >> 2;\n\n            comp_ref_y = curCr - 1;\n        }\n    }\n    else if (video->intraAvailB)\n    {\n        comp_ref_x = curCb - pitch;\n        for (i = 0; i < 2; i++)\n        {\n            pred_a = *((uint32*)comp_ref_x);\n            comp_ref_x += 4;\n            pred_b = (pred_a >> 8) & 0xFF00FF;\n            pred_a &= 0xFF00FF;\n            pred_a += pred_b;\n            pred_a += (pred_a >> 16);\n            sum_x0 = pred_a & 0xFFFF;\n\n            pred_a = *((uint32*)comp_ref_x);\n            pred_b = (pred_a >> 8) & 0xFF00FF;\n            pred_a &= 0xFF00FF;\n            pred_a += pred_b;\n            pred_a += (pred_a >> 16);\n            sum_x1 = pred_a & 0xFFFF;\n\n            pred_0[i] = pred_2[i] = (sum_x0 + 2) >> 2;\n            pred_1[i] = pred_3[i] = (sum_x1 + 2) >> 2;\n\n            comp_ref_x = curCr - pitch;\n        }\n    }\n    else\n    {\n        pred_0[0] = pred_0[1] = pred_1[0] = pred_1[1] =\n                                                pred_2[0] = pred_2[1] = pred_3[0] = pred_3[1] = 128;\n    }\n\n    pred = encvid->pred_ic[AVC_IC_DC];\n\n    pred_a = pred_0[0];\n    pred_b = pred_1[0];\n    pred_a |= (pred_a << 8);\n    pred_a |= (pred_a << 16);\n    pred_b |= (pred_b << 8);\n    pred_b |= (pred_b << 16);\n\n    pred_c = pred_0[1];\n    pred_d = pred_1[1];\n    pred_c |= (pred_c << 8);\n    pred_c |= (pred_c << 16);\n    pred_d |= (pred_d << 8);\n    pred_d |= (pred_d << 16);\n\n\n    for (j = 0; j < 4; j++) /* 4 lines */\n    {\n        *((uint32*)pred) = pred_a;\n        *((uint32*)(pred + 4)) = pred_b;\n        *((uint32*)(pred + 8)) = pred_c;\n        *((uint32*)(pred + 12)) = pred_d;\n        pred += 16; /* move to the next line */\n    }\n\n    pred_a = pred_2[0];\n    pred_b = pred_3[0];\n    pred_a |= (pred_a << 8);\n    pred_a |= (pred_a << 16);\n    pred_b |= (pred_b << 8);\n    pred_b |= (pred_b << 16);\n\n    pred_c = pred_2[1];\n    pred_d = pred_3[1];\n    pred_c |= (pred_c << 8);\n    pred_c |= (pred_c << 16);\n    pred_d |= (pred_d << 8);\n    pred_d |= (pred_d << 16);\n\n    for (j = 0; j < 4; j++) /* 4 lines */\n    {\n        *((uint32*)pred) = pred_a;\n        *((uint32*)(pred + 4)) = pred_b;\n        *((uint32*)(pred + 8)) = pred_c;\n        *((uint32*)(pred + 12)) = pred_d;\n        pred += 16; /* move to the next line */\n    }\n\n    /* predict horizontal mode */\n    if (video->intraAvailA)\n    {\n        comp_ref_y = curCb - 1;\n        comp_ref_x = curCr - 1;\n        pred = encvid->pred_ic[AVC_IC_Horizontal];\n\n        for (i = 4; i < 6; i++)\n        {\n            for (j = 0; j < 4; j++)\n            {\n                pred_a = *comp_ref_y;\n                comp_ref_y += pitch;\n                pred_a |= (pred_a << 8);\n                pred_a |= (pred_a << 16);\n                *((uint32*)pred) = pred_a;\n                *((uint32*)(pred + 4)) = pred_a;\n\n                pred_a = *comp_ref_x;\n                comp_ref_x += pitch;\n                pred_a |= (pred_a << 8);\n                pred_a |= (pred_a << 16);\n                *((uint32*)(pred + 8)) = pred_a;\n                *((uint32*)(pred + 12)) = pred_a;\n\n                pred += 16;\n            }\n        }\n    }\n\n    /* vertical mode */\n    if (video->intraAvailB)\n    {\n        comp_ref_x = curCb - pitch;\n        comp_ref_y = curCr - pitch;\n        pred = encvid->pred_ic[AVC_IC_Vertical];\n\n        pred_a = *((uint32*)comp_ref_x);\n        pred_b = *((uint32*)(comp_ref_x + 4));\n        pred_c = *((uint32*)comp_ref_y);\n        pred_d = *((uint32*)(comp_ref_y + 4));\n\n        for (j = 0; j < 8; j++)\n        {\n            *((uint32*)pred) = pred_a;\n            *((uint32*)(pred + 4)) = pred_b;\n            *((uint32*)(pred + 8)) = pred_c;\n            *((uint32*)(pred + 12)) = pred_d;\n            pred += 16;\n        }\n    }\n\n    /* Intra_Chroma_Plane */\n    if (video->intraAvailA && video->intraAvailB && video->intraAvailD)\n    {\n        comp_ref_x = curCb - pitch;\n        comp_ref_y = curCb - 1;\n        topleft = curCb[-pitch-1];\n\n        pred = encvid->pred_ic[AVC_IC_Plane];\n        for (component = 0; component < 2; component++)\n        {\n            H = V = 0;\n            comp_ref_x0 = comp_ref_x + 4;\n            comp_ref_x1 = comp_ref_x + 2;\n            comp_ref_y0 = comp_ref_y + (pitch << 2);\n            comp_ref_y1 = comp_ref_y + (pitch << 1);\n            for (i = 1; i < 4; i++)\n            {\n                H += i * (*comp_ref_x0++ - *comp_ref_x1--);\n                V += i * (*comp_ref_y0 - *comp_ref_y1);\n                comp_ref_y0 += pitch;\n                comp_ref_y1 -= pitch;\n            }\n            H += i * (*comp_ref_x0++ - topleft);\n            V += i * (*comp_ref_y0 - *comp_ref_y1);\n\n            a_16 = ((*(comp_ref_x + 7) + *(comp_ref_y + 7 * pitch)) << 4) + 16;\n            b = (17 * H + 16) >> 5;\n            c = (17 * V + 16) >> 5;\n\n            pred_a = 0;\n            for (i = 4; i < 6; i++)\n            {\n                for (j = 0; j < 4; j++)\n                {\n                    factor_c = a_16 + c * (pred_a++ - 3);\n\n                    factor_c -= 3 * b;\n\n                    value = factor_c >> 5;\n                    factor_c += b;\n                    CLIP_RESULT(value)\n                    pred_b = value;\n                    value = factor_c >> 5;\n                    factor_c += b;\n                    CLIP_RESULT(value)\n                    pred_b |= (value << 8);\n                    value = factor_c >> 5;\n                    factor_c += b;\n                    CLIP_RESULT(value)\n                    pred_b |= (value << 16);\n                    value = factor_c >> 5;\n                    factor_c += b;\n                    CLIP_RESULT(value)\n                    pred_b |= (value << 24);\n                    *((uint32*)pred) = pred_b;\n\n                    value = factor_c >> 5;\n                    factor_c += b;\n                    CLIP_RESULT(value)\n                    pred_b = value;\n                    value = factor_c >> 5;\n                    factor_c += b;\n                    CLIP_RESULT(value)\n                    pred_b |= (value << 8);\n                    value = factor_c >> 5;\n                    factor_c += b;\n                    CLIP_RESULT(value)\n                    pred_b |= (value << 16);\n                    value = factor_c >> 5;\n                    factor_c += b;\n                    CLIP_RESULT(value)\n                    pred_b |= (value << 24);\n                    *((uint32*)(pred + 4)) = pred_b;\n                    pred += 16;\n                }\n            }\n\n            pred -= 120; /* point to cr */\n            comp_ref_x = curCr - pitch;\n            comp_ref_y = curCr - 1;\n            topleft = curCr[-pitch-1];\n        }\n    }\n\n    /* now evaluate it */\n\n    org_pitch = (currInput->pitch) >> 1;\n    offset = x_pos + y_pos * org_pitch;\n\n    orgCb = currInput->YCbCr[1] + offset;\n    orgCr = currInput->YCbCr[2] + offset;\n\n    mincost = 0x7fffffff;\n    cost = SATDChroma(orgCb, orgCr, org_pitch, encvid->pred_ic[AVC_IC_DC], mincost);\n    if (cost < mincost)\n    {\n        mincost = cost;\n        currMB->intra_chroma_pred_mode = AVC_IC_DC;\n    }\n\n    if (video->intraAvailA)\n    {\n        cost = SATDChroma(orgCb, orgCr, org_pitch, encvid->pred_ic[AVC_IC_Horizontal], mincost);\n        if (cost < mincost)\n        {\n            mincost = cost;\n            currMB->intra_chroma_pred_mode = AVC_IC_Horizontal;\n        }\n    }\n\n    if (video->intraAvailB)\n    {\n        cost = SATDChroma(orgCb, orgCr, org_pitch, encvid->pred_ic[AVC_IC_Vertical], mincost);\n        if (cost < mincost)\n        {\n            mincost = cost;\n            currMB->intra_chroma_pred_mode = AVC_IC_Vertical;\n        }\n    }\n\n    if (video->intraAvailA && video->intraAvailB && video->intraAvailD)\n    {\n        cost = SATDChroma(orgCb, orgCr, org_pitch, encvid->pred_ic[AVC_IC_Plane], mincost);\n        if (cost < mincost)\n        {\n            mincost = cost;\n            currMB->intra_chroma_pred_mode = AVC_IC_Plane;\n        }\n    }\n\n\n    return ;\n}\n\n\nint SATDChroma(uint8 *orgCb, uint8 *orgCr, int org_pitch, uint8 *pred, int min_cost)\n{\n    int cost;\n    /* first take difference between orgCb, orgCr and pred */\n    int16 res[128], *pres; // residue\n    int m0, m1, m2, m3, tmp1;\n    int j, k;\n\n    pres = res;\n    org_pitch -= 8;\n    // horizontal transform\n    for (j = 0; j < 8; j++)\n    {\n        k = 2;\n        while (k > 0)\n        {\n            m0 = orgCb[0] - pred[0];\n            m3 = orgCb[3] - pred[3];\n            m0 += m3;\n            m3 = m0 - (m3 << 1);\n            m1 = orgCb[1] - pred[1];\n            m2 = orgCb[2] - pred[2];\n            m1 += m2;\n            m2 = m1 - (m2 << 1);\n            pres[0] = m0 + m1;\n            pres[2] = m0 - m1;\n            pres[1] = m2 + m3;\n            pres[3] = m3 - m2;\n\n            orgCb += 4;\n            pres += 4;\n            pred += 4;\n            k--;\n        }\n        orgCb += org_pitch;\n        k = 2;\n        while (k > 0)\n        {\n            m0 = orgCr[0] - pred[0];\n            m3 = orgCr[3] - pred[3];\n            m0 += m3;\n            m3 = m0 - (m3 << 1);\n            m1 = orgCr[1] - pred[1];\n            m2 = orgCr[2] - pred[2];\n            m1 += m2;\n            m2 = m1 - (m2 << 1);\n            pres[0] = m0 + m1;\n            pres[2] = m0 - m1;\n            pres[1] = m2 + m3;\n            pres[3] = m3 - m2;\n\n            orgCr += 4;\n            pres += 4;\n            pred += 4;\n            k--;\n        }\n        orgCr += org_pitch;\n    }\n\n    /* vertical transform */\n    for (j = 0; j < 2; j++)\n    {\n        pres = res + (j << 6);\n        k = 16;\n        while (k > 0)\n        {\n            m0 = pres[0];\n            m3 = pres[3<<4];\n            m0 += m3;\n            m3 = m0 - (m3 << 1);\n            m1 = pres[1<<4];\n            m2 = pres[2<<4];\n            m1 += m2;\n            m2 = m1 - (m2 << 1);\n            pres[0] = m0 + m1;\n            pres[2<<4] = m0 - m1;\n            pres[1<<4] = m2 + m3;\n            pres[3<<4] = m3 - m2;\n\n            pres++;\n            k--;\n        }\n    }\n\n    /* now sum of absolute value */\n    pres = res;\n    cost = 0;\n    k = 128;\n    while (k > 0)\n    {\n        tmp1 = *pres++;\n        cost += ((tmp1 >= 0) ? tmp1 : -tmp1);\n        tmp1 = *pres++;\n        cost += ((tmp1 >= 0) ? tmp1 : -tmp1);\n        tmp1 = *pres++;\n        cost += ((tmp1 >= 0) ? tmp1 : -tmp1);\n        tmp1 = *pres++;\n        cost += ((tmp1 >= 0) ? tmp1 : -tmp1);\n        tmp1 = *pres++;\n        cost += ((tmp1 >= 0) ? tmp1 : -tmp1);\n        tmp1 = *pres++;\n        cost += ((tmp1 >= 0) ? tmp1 : -tmp1);\n        tmp1 = *pres++;\n        cost += ((tmp1 >= 0) ? tmp1 : -tmp1);\n        tmp1 = *pres++;\n        cost += ((tmp1 >= 0) ? tmp1 : -tmp1);\n        k -= 8;\n        if (cost > min_cost) /* early drop out */\n        {\n            return cost;\n        }\n    }\n\n    return cost;\n}\n\n\n\n///////////////////////////////// old code, unused\n/* find the best intra mode based on original (unencoded) frame */\n/* output is\n    currMB->mb_intra, currMB->mbMode,\n    currMB->i16Mode  (if currMB->mbMode == AVC_I16)\n    currMB->i4Mode[..] (if currMB->mbMode == AVC_I4) */\n\n#ifdef FIXED_INTRAPRED_MODE\nvoid MBIntraSearch(AVCEncObject *encvid, AVCMacroblock *currMB, int mbNum)\n{\n    (void)(mbNum);\n\n    AVCCommonObj *video = encvid->common;\n    int indx, block_x, block_y;\n\n    video->intraAvailA = video->intraAvailB = video->intraAvailC = video->intraAvailD = 0;\n\n    if (!video->currPicParams->constrained_intra_pred_flag)\n    {\n        video->intraAvailA = video->mbAvailA;\n        video->intraAvailB = video->mbAvailB;\n        video->intraAvailC = video->mbAvailC;\n        video->intraAvailD = video->mbAvailD;\n    }\n    else\n    {\n        if (video->mbAvailA)\n        {\n            video->intraAvailA = video->mblock[video->mbAddrA].mb_intra;\n        }\n        if (video->mbAvailB)\n        {\n            video->intraAvailB = video->mblock[video->mbAddrB].mb_intra ;\n        }\n        if (video->mbAvailC)\n        {\n            video->intraAvailC = video->mblock[video->mbAddrC].mb_intra;\n        }\n        if (video->mbAvailD)\n        {\n            video->intraAvailD = video->mblock[video->mbAddrD].mb_intra;\n        }\n    }\n\n    currMB->mb_intra = TRUE;\n    currMB->mbMode = FIXED_INTRAPRED_MODE;\n\n    if (currMB->mbMode == AVC_I16)\n    {\n        currMB->i16Mode = FIXED_I16_MODE;\n\n        if (FIXED_I16_MODE == AVC_I16_Vertical && !video->intraAvailB)\n        {\n            currMB->i16Mode = AVC_I16_DC;\n        }\n\n        if (FIXED_I16_MODE == AVC_I16_Horizontal && !video->intraAvailA)\n        {\n            currMB->i16Mode = AVC_I16_DC;\n        }\n\n        if (FIXED_I16_MODE == AVC_I16_Plane && !(video->intraAvailA && video->intraAvailB && video->intraAvailD))\n        {\n            currMB->i16Mode = AVC_I16_DC;\n        }\n    }\n    else //if(currMB->mbMode == AVC_I4)\n    {\n        for (indx = 0; indx < 16; indx++)\n        {\n            block_x = blkIdx2blkX[indx];\n            block_y = blkIdx2blkY[indx];\n\n            currMB->i4Mode[(block_y<<2)+block_x] = FIXED_I4_MODE;\n\n            if (FIXED_I4_MODE == AVC_I4_Vertical && !(block_y > 0 || video->intraAvailB))\n            {\n                currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;\n            }\n\n            if (FIXED_I4_MODE == AVC_I4_Horizontal && !(block_x || video->intraAvailA))\n            {\n                currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;\n            }\n\n            if (FIXED_I4_MODE == AVC_I4_Diagonal_Down_Left &&\n                    (block_y == 0 && !video->intraAvailB))\n            {\n                currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;\n            }\n\n            if (FIXED_I4_MODE == AVC_I4_Diagonal_Down_Right &&\n                    !((block_y && block_x)\n                      || (block_y && video->intraAvailA)\n                      || (block_x && video->intraAvailB)\n                      || (video->intraAvailA && video->intraAvailD && video->intraAvailB)))\n            {\n                currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;\n            }\n\n            if (FIXED_I4_MODE == AVC_I4_Vertical_Right &&\n                    !((block_y && block_x)\n                      || (block_y && video->intraAvailA)\n                      || (block_x && video->intraAvailB)\n                      || (video->intraAvailA && video->intraAvailD && video->intraAvailB)))\n            {\n                currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;\n            }\n\n            if (FIXED_I4_MODE == AVC_I4_Horizontal_Down &&\n                    !((block_y && block_x)\n                      || (block_y && video->intraAvailA)\n                      || (block_x && video->intraAvailB)\n                      || (video->intraAvailA && video->intraAvailD && video->intraAvailB)))\n            {\n                currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;\n            }\n\n            if (FIXED_I4_MODE == AVC_I4_Vertical_Left &&\n                    (block_y == 0 && !video->intraAvailB))\n            {\n                currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;\n            }\n\n            if (FIXED_I4_MODE == AVC_I4_Horizontal_Up && !(block_x || video->intraAvailA))\n            {\n                currMB->i4Mode[(block_y<<2)+block_x] = AVC_I4_DC;\n            }\n        }\n    }\n\n    currMB->intra_chroma_pred_mode = FIXED_INTRA_CHROMA_MODE;\n\n    if (FIXED_INTRA_CHROMA_MODE == AVC_IC_Horizontal && !(video->intraAvailA))\n    {\n        currMB->intra_chroma_pred_mode = AVC_IC_DC;\n    }\n\n    if (FIXED_INTRA_CHROMA_MODE == AVC_IC_Vertical && !(video->intraAvailB))\n    {\n        currMB->intra_chroma_pred_mode = AVC_IC_DC;\n    }\n\n    if (FIXED_INTRA_CHROMA_MODE == AVC_IC_Plane && !(video->intraAvailA && video->intraAvailB && video->intraAvailD))\n    {\n        currMB->intra_chroma_pred_mode = AVC_IC_DC;\n    }\n\n    /* also reset the motion vectors */\n    /* set MV and Ref_Idx codes of Intra blocks in P-slices */\n    oscl_memset(currMB->mvL0, 0, sizeof(int32)*16);\n    currMB->ref_idx_L0[0] = -1;\n    currMB->ref_idx_L0[1] = -1;\n    currMB->ref_idx_L0[2] = -1;\n    currMB->ref_idx_L0[3] = -1;\n\n    // output from this function, currMB->mbMode should be set to either\n    // AVC_I4, AVC_I16, or else in AVCMBMode enum, mbType, mb_intra, intra_chroma_pred_mode */\n    return ;\n}\n#else // faster combined prediction+SAD calculation\nvoid MBIntraSearch(AVCEncObject *encvid, AVCMacroblock *currMB, int mbNum)\n{\n    AVCCommonObj *video = encvid->common;\n    AVCFrameIO *currInput = encvid->currInput;\n    uint8 *curL, *curCb, *curCr;\n    uint8 *comp, *pred_block;\n    int block_x, block_y, offset;\n    uint sad, sad4, sadI4, sadI16;\n    int component, SubBlock_indx, temp;\n    int pitch = video->currPic->pitch;\n\n    /* calculate the cost of each intra prediction mode  and compare to the\n    inter mode */\n    /* full search for all intra prediction */\n    offset = (video->mb_y << 4) * pitch + (video->mb_x << 4);\n    curL = currInput->YCbCr[0] + offset;\n    pred_block = video->pred_block + 84;\n\n    /* Assuming that InitNeighborAvailability has been called prior to this function */\n    video->intraAvailA = video->intraAvailB = video->intraAvailC = video->intraAvailD = 0;\n\n    if (!video->currPicParams->constrained_intra_pred_flag)\n    {\n        video->intraAvailA = video->mbAvailA;\n        video->intraAvailB = video->mbAvailB;\n        video->intraAvailC = video->mbAvailC;\n        video->intraAvailD = video->mbAvailD;\n    }\n    else\n    {\n        if (video->mbAvailA)\n        {\n            video->intraAvailA = video->mblock[video->mbAddrA].mb_intra;\n        }\n        if (video->mbAvailB)\n        {\n            video->intraAvailB = video->mblock[video->mbAddrB].mb_intra ;\n        }\n        if (video->mbAvailC)\n        {\n            video->intraAvailC = video->mblock[video->mbAddrC].mb_intra;\n        }\n        if (video->mbAvailD)\n        {\n            video->intraAvailD = video->mblock[video->mbAddrD].mb_intra;\n        }\n    }\n\n    /* currently we're doing exhaustive search. Smart search will be used later */\n\n    /* I16 modes */\n    curL = currInput->YCbCr[0] + offset;\n    video->pintra_pred_top = curL - pitch;\n    video->pintra_pred_left = curL - 1;\n    if (video->mb_y)\n    {\n        video->intra_pred_topleft = *(curL - pitch - 1);\n    }\n\n    /* Intra_16x16_Vertical */\n    sadI16 = 65536;\n    /* check availability of top */\n    if (video->intraAvailB)\n    {\n        sad = SAD_I16_Vert(video, curL, sadI16);\n\n        if (sad < sadI16)\n        {\n            sadI16 = sad;\n            currMB->i16Mode = AVC_I16_Vertical;\n        }\n    }\n    /* Intra_16x16_Horizontal */\n    /* check availability of left */\n    if (video->intraAvailA)\n    {\n        sad = SAD_I16_HorzDC(video, curL, AVC_I16_Horizontal, sadI16);\n\n        if (sad < sadI16)\n        {\n            sadI16 = sad;\n            currMB->i16Mode = AVC_I16_Horizontal;\n        }\n    }\n\n    /* Intra_16x16_DC, default mode */\n    sad = SAD_I16_HorzDC(video, curL, AVC_I16_DC, sadI16);\n    if (sad < sadI16)\n    {\n        sadI16 = sad;\n        currMB->i16Mode = AVC_I16_DC;\n    }\n\n    /* Intra_16x16_Plane */\n    if (video->intraAvailA && video->intraAvailB && video->intraAvailD)\n    {\n        sad = SAD_I16_Plane(video, curL, sadI16);\n\n        if (sad < sadI16)\n        {\n            sadI16 = sad;\n            currMB->i16Mode = AVC_I16_Plane;\n        }\n    }\n\n    sadI16 >>= 1;  /* before comparison */\n\n    /* selection between intra4, intra16 or inter mode */\n    if (sadI16 < encvid->min_cost)\n    {\n        currMB->mb_intra = TRUE;\n        currMB->mbMode = AVC_I16;\n        encvid->min_cost = sadI16;\n    }\n\n    if (currMB->mb_intra) /* only do the chrominance search when intra is decided */\n    {\n        /* Note that we might be able to guess the type of prediction from\n        the luma prediction type */\n\n        /* now search for the best chroma intra prediction */\n        offset = (offset >> 2) + (video->mb_x << 2);\n        curCb = currInput->YCbCr[1] + offset;\n        curCr = currInput->YCbCr[2] + offset;\n\n        pitch >>= 1;\n        video->pintra_pred_top_cb = curCb - pitch;\n        video->pintra_pred_left_cb = curCb - 1;\n        video->pintra_pred_top_cr = curCr - pitch;\n        video->pintra_pred_left_cr = curCr - 1;\n\n        if (video->mb_y)\n        {\n            video->intra_pred_topleft_cb = *(curCb - pitch - 1);\n            video->intra_pred_topleft_cr = *(curCr - pitch - 1);\n        }\n\n        /* Intra_Chroma_DC */\n        sad4 = SAD_Chroma_DC(video, curCb, curCr, 65536);\n        currMB->intra_chroma_pred_mode = AVC_IC_DC;\n\n        /* Intra_Chroma_Horizontal */\n        if (video->intraAvailA)\n        {\n            /* check availability of left */\n            sad = SAD_Chroma_Horz(video, curCb, curCr, sad4);\n            if (sad < sad4)\n            {\n                sad4 = sad;\n                currMB->intra_chroma_pred_mode = AVC_IC_Horizontal;\n            }\n        }\n\n        /* Intra_Chroma_Vertical */\n        if (video->intraAvailB)\n        {\n            /* check availability of top */\n            sad = SAD_Chroma_Vert(video, curCb, curCr, sad4);\n\n            if (sad < sad4)\n            {\n                sad4 = sad;\n                currMB->intra_chroma_pred_mode = AVC_IC_Vertical;\n            }\n        }\n\n        /* Intra_Chroma_Plane */\n        if (video->intraAvailA && video->intraAvailB && video->intraAvailD)\n        {\n            /* check availability of top and left */\n            Intra_Chroma_Plane(video, pitch);\n\n            sad = SADChroma(pred_block + 452, curCb, curCr, pitch);\n\n            if (sad < sad4)\n            {\n                sad4 = sad;\n                currMB->intra_chroma_pred_mode = AVC_IC_Plane;\n            }\n        }\n\n        /* also reset the motion vectors */\n        /* set MV and Ref_Idx codes of Intra blocks in P-slices */\n        oscl_memset(currMB->mvL0, 0, sizeof(int32)*16);\n        oscl_memset(currMB->ref_idx_L0, -1, sizeof(int16)*4);\n\n    }\n\n    // output from this function, currMB->mbMode should be set to either\n    // AVC_I4, AVC_I16, or else in AVCMBMode enum, mbType, mb_intra, intra_chroma_pred_mode */\n\n    return ;\n}\n#endif\n\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/motion_comp.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"avcenc_lib.h\"\n#include \"avcenc_int.h\"\n#include \"oscl_mem.h\"\n\n\n#define CLIP_RESULT(x)      if((uint)x > 0xFF){ \\\n                 x = 0xFF & (~(x>>31));}\n\n/* (blkwidth << 2) + (dy << 1) + dx */\nstatic void (*const eChromaMC_SIMD[8])(uint8 *, int , int , int , uint8 *, int, int , int) =\n{\n    &eChromaFullMC_SIMD,\n    &eChromaHorizontalMC_SIMD,\n    &eChromaVerticalMC_SIMD,\n    &eChromaDiagonalMC_SIMD,\n    &eChromaFullMC_SIMD,\n    &eChromaHorizontalMC2_SIMD,\n    &eChromaVerticalMC2_SIMD,\n    &eChromaDiagonalMC2_SIMD\n};\n/* Perform motion prediction and compensation with residue if exist. */\nvoid AVCMBMotionComp(AVCEncObject *encvid, AVCCommonObj *video)\n{\n    (void)(encvid);\n\n    AVCMacroblock *currMB = video->currMB;\n    AVCPictureData *currPic = video->currPic;\n    int mbPartIdx, subMbPartIdx;\n    int ref_idx;\n    int offset_MbPart_indx = 0;\n    int16 *mv;\n    uint32 x_pos, y_pos;\n    uint8 *curL, *curCb, *curCr;\n    uint8 *ref_l, *ref_Cb, *ref_Cr;\n    uint8 *predBlock, *predCb, *predCr;\n    int block_x, block_y, offset_x, offset_y, offsetP, offset;\n    int x_position = (video->mb_x << 4);\n    int y_position = (video->mb_y << 4);\n    int MbHeight, MbWidth, mbPartIdx_X, mbPartIdx_Y, offset_indx;\n    int picWidth = currPic->width;\n    int picPitch = currPic->pitch;\n    int picHeight = currPic->height;\n    uint32 tmp_word;\n\n    tmp_word = y_position * picPitch;\n    curL = currPic->Sl + tmp_word + x_position;\n    offset = (tmp_word >> 2) + (x_position >> 1);\n    curCb = currPic->Scb + offset;\n    curCr = currPic->Scr + offset;\n\n    predBlock = curL;\n    predCb = curCb;\n    predCr = curCr;\n\n    GetMotionVectorPredictor(video, 1);\n\n    for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)\n    {\n        MbHeight = currMB->SubMbPartHeight[mbPartIdx];\n        MbWidth = currMB->SubMbPartWidth[mbPartIdx];\n        mbPartIdx_X = ((mbPartIdx + offset_MbPart_indx) & 1);\n        mbPartIdx_Y = (mbPartIdx + offset_MbPart_indx) >> 1;\n        ref_idx = currMB->ref_idx_L0[(mbPartIdx_Y << 1) + mbPartIdx_X];\n        offset_indx = 0;\n\n        ref_l = video->RefPicList0[ref_idx]->Sl;\n        ref_Cb = video->RefPicList0[ref_idx]->Scb;\n        ref_Cr = video->RefPicList0[ref_idx]->Scr;\n\n        for (subMbPartIdx = 0; subMbPartIdx < currMB->NumSubMbPart[mbPartIdx]; subMbPartIdx++)\n        {\n            block_x = (mbPartIdx_X << 1) + ((subMbPartIdx + offset_indx) & 1);\n            block_y = (mbPartIdx_Y << 1) + (((subMbPartIdx + offset_indx) >> 1) & 1);\n            mv = (int16*)(currMB->mvL0 + block_x + (block_y << 2));\n            offset_x = x_position + (block_x << 2);\n            offset_y = y_position + (block_y << 2);\n            x_pos = (offset_x << 2) + *mv++;   /*quarter pel */\n            y_pos = (offset_y << 2) + *mv;   /*quarter pel */\n\n            //offset = offset_y * currPic->width;\n            //offsetC = (offset >> 2) + (offset_x >> 1);\n            offsetP = (block_y << 2) * picPitch + (block_x << 2);\n            eLumaMotionComp(ref_l, picPitch, picHeight, x_pos, y_pos,\n                            /*comp_Sl + offset + offset_x,*/\n                            predBlock + offsetP, picPitch, MbWidth, MbHeight);\n\n            offsetP = (block_y * picWidth) + (block_x << 1);\n            eChromaMotionComp(ref_Cb, picWidth >> 1, picHeight >> 1, x_pos, y_pos,\n                              /*comp_Scb +  offsetC,*/\n                              predCb + offsetP, picPitch >> 1, MbWidth >> 1, MbHeight >> 1);\n            eChromaMotionComp(ref_Cr, picWidth >> 1, picHeight >> 1, x_pos, y_pos,\n                              /*comp_Scr +  offsetC,*/\n                              predCr + offsetP, picPitch >> 1, MbWidth >> 1, MbHeight >> 1);\n\n            offset_indx = currMB->SubMbPartWidth[mbPartIdx] >> 3;\n        }\n        offset_MbPart_indx = currMB->MbPartWidth >> 4;\n    }\n\n    return ;\n}\n\n\n/* preform the actual  motion comp here */\nvoid eLumaMotionComp(uint8 *ref, int picpitch, int picheight,\n                     int x_pos, int y_pos,\n                     uint8 *pred, int pred_pitch,\n                     int blkwidth, int blkheight)\n{\n    (void)(picheight);\n\n    int dx, dy;\n    int temp2[21][21]; /* for intermediate results */\n    uint8 *ref2;\n\n    dx = x_pos & 3;\n    dy = y_pos & 3;\n    x_pos = x_pos >> 2;  /* round it to full-pel resolution */\n    y_pos = y_pos >> 2;\n\n    /* perform actual motion compensation */\n    if (dx == 0 && dy == 0)\n    {  /* fullpel position *//* G */\n\n        ref += y_pos * picpitch + x_pos;\n\n        eFullPelMC(ref, picpitch, pred, pred_pitch, blkwidth, blkheight);\n\n    }   /* other positions */\n    else  if (dy == 0)\n    { /* no vertical interpolation *//* a,b,c*/\n\n        ref += y_pos * picpitch + x_pos;\n\n        eHorzInterp1MC(ref, picpitch, pred, pred_pitch, blkwidth, blkheight, dx);\n    }\n    else if (dx == 0)\n    { /*no horizontal interpolation *//* d,h,n */\n\n        ref += y_pos * picpitch + x_pos;\n\n        eVertInterp1MC(ref, picpitch, pred, pred_pitch, blkwidth, blkheight, dy);\n    }\n    else if (dy == 2)\n    {  /* horizontal cross *//* i, j, k */\n\n        ref += y_pos * picpitch + x_pos - 2; /* move to the left 2 pixels */\n\n        eVertInterp2MC(ref, picpitch, &temp2[0][0], 21, blkwidth + 5, blkheight);\n\n        eHorzInterp2MC(&temp2[0][2], 21, pred, pred_pitch, blkwidth, blkheight, dx);\n    }\n    else if (dx == 2)\n    { /* vertical cross */ /* f,q */\n\n        ref += (y_pos - 2) * picpitch + x_pos; /* move to up 2 lines */\n\n        eHorzInterp3MC(ref, picpitch, &temp2[0][0], 21, blkwidth, blkheight + 5);\n        eVertInterp3MC(&temp2[2][0], 21, pred, pred_pitch, blkwidth, blkheight, dy);\n    }\n    else\n    { /* diagonal *//* e,g,p,r */\n\n        ref2 = ref + (y_pos + (dy / 2)) * picpitch + x_pos;\n\n        ref += (y_pos * picpitch) + x_pos + (dx / 2);\n\n        eDiagonalInterpMC(ref2, ref, picpitch, pred, pred_pitch, blkwidth, blkheight);\n    }\n\n    return ;\n}\n\nvoid eCreateAlign(uint8 *ref, int picpitch, int y_pos,\n                  uint8 *out, int blkwidth, int blkheight)\n{\n    int i, j;\n    int offset, out_offset;\n    uint32 prev_pix, result, pix1, pix2, pix4;\n\n    ref += y_pos * picpitch;// + x_pos;\n    out_offset = 24 - blkwidth;\n\n    //switch(x_pos&0x3){\n    switch (((uint32)ref)&0x3)\n    {\n        case 1:\n            offset =  picpitch - blkwidth - 3;\n            for (j = 0; j < blkheight; j++)\n            {\n                pix1 = *ref++;\n                pix2 = *((uint16*)ref);\n                ref += 2;\n                result = (pix2 << 8) | pix1;\n\n                for (i = 3; i < blkwidth; i += 4)\n                {\n                    pix4 = *((uint32*)ref);\n                    ref += 4;\n                    prev_pix = (pix4 << 24) & 0xFF000000; /* mask out byte belong to previous word */\n                    result |= prev_pix;\n                    *((uint32*)out) = result;  /* write 4 bytes */\n                    out += 4;\n                    result = pix4 >> 8; /* for the next loop */\n                }\n                ref += offset;\n                out += out_offset;\n            }\n            break;\n        case 2:\n            offset =  picpitch - blkwidth - 2;\n            for (j = 0; j < blkheight; j++)\n            {\n                result = *((uint16*)ref);\n                ref += 2;\n                for (i = 2; i < blkwidth; i += 4)\n                {\n                    pix4 = *((uint32*)ref);\n                    ref += 4;\n                    prev_pix = (pix4 << 16) & 0xFFFF0000; /* mask out byte belong to previous word */\n                    result |= prev_pix;\n                    *((uint32*)out) = result;  /* write 4 bytes */\n                    out += 4;\n                    result = pix4 >> 16; /* for the next loop */\n                }\n                ref += offset;\n                out += out_offset;\n            }\n            break;\n        case 3:\n            offset =  picpitch - blkwidth - 1;\n            for (j = 0; j < blkheight; j++)\n            {\n                result = *ref++;\n                for (i = 1; i < blkwidth; i += 4)\n                {\n                    pix4 = *((uint32*)ref);\n                    ref += 4;\n                    prev_pix = (pix4 << 8) & 0xFFFFFF00; /* mask out byte belong to previous word */\n                    result |= prev_pix;\n                    *((uint32*)out) = result;  /* write 4 bytes */\n                    out += 4;\n                    result = pix4 >> 24; /* for the next loop */\n                }\n                ref += offset;\n                out += out_offset;\n            }\n            break;\n    }\n}\n\nvoid eHorzInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch,\n                    int blkwidth, int blkheight, int dx)\n{\n    uint8 *p_ref;\n    uint32 *p_cur;\n    uint32 tmp, pkres;\n    int result, curr_offset, ref_offset;\n    int j;\n    int32 r0, r1, r2, r3, r4, r5;\n    int32 r13, r6;\n\n    p_cur = (uint32*)out; /* assume it's word aligned */\n    curr_offset = (outpitch - blkwidth) >> 2;\n    p_ref = in;\n    ref_offset = inpitch - blkwidth;\n\n    if (dx&1)\n    {\n        dx = ((dx >> 1) ? -3 : -4); /* use in 3/4 pel */\n        p_ref -= 2;\n        r13 = 0;\n        for (j = blkheight; j > 0; j--)\n        {\n            tmp = (uint32)(p_ref + blkwidth);\n            r0 = p_ref[0];\n            r1 = p_ref[2];\n            r0 |= (r1 << 16);           /* 0,c,0,a */\n            r1 = p_ref[1];\n            r2 = p_ref[3];\n            r1 |= (r2 << 16);           /* 0,d,0,b */\n            while ((uint32)p_ref < tmp)\n            {\n                r2 = *(p_ref += 4); /* move pointer to e */\n                r3 = p_ref[2];\n                r2 |= (r3 << 16);           /* 0,g,0,e */\n                r3 = p_ref[1];\n                r4 = p_ref[3];\n                r3 |= (r4 << 16);           /* 0,h,0,f */\n\n                r4 = r0 + r3;       /* c+h, a+f */\n                r5 = r0 + r1;   /* c+d, a+b */\n                r6 = r2 + r3;   /* g+h, e+f */\n                r5 >>= 16;\n                r5 |= (r6 << 16);   /* e+f, c+d */\n                r4 += r5 * 20;      /* c+20*e+20*f+h, a+20*c+20*d+f */\n                r4 += 0x100010; /* +16, +16 */\n                r5 = r1 + r2;       /* d+g, b+e */\n                r4 -= r5 * 5;       /* c-5*d+20*e+20*f-5*g+h, a-5*b+20*c+20*d-5*e+f */\n                r4 >>= 5;\n                r13 |= r4;      /* check clipping */\n\n                r5 = p_ref[dx+2];\n                r6 = p_ref[dx+4];\n                r5 |= (r6 << 16);\n                r4 += r5;\n                r4 += 0x10001;\n                r4 = (r4 >> 1) & 0xFF00FF;\n\n                r5 = p_ref[4];  /* i */\n                r6 = (r5 << 16);\n                r5 = r6 | (r2 >> 16);/* 0,i,0,g */\n                r5 += r1;       /* d+i, b+g */ /* r5 not free */\n                r1 >>= 16;\n                r1 |= (r3 << 16); /* 0,f,0,d */ /* r1 has changed */\n                r1 += r2;       /* f+g, d+e */\n                r5 += 20 * r1;  /* d+20f+20g+i, b+20d+20e+g */\n                r0 >>= 16;\n                r0 |= (r2 << 16); /* 0,e,0,c */ /* r0 has changed */\n                r0 += r3;       /* e+h, c+f */\n                r5 += 0x100010; /* 16,16 */\n                r5 -= r0 * 5;       /* d-5e+20f+20g-5h+i, b-5c+20d+20e-5f+g */\n                r5 >>= 5;\n                r13 |= r5;      /* check clipping */\n\n                r0 = p_ref[dx+3];\n                r1 = p_ref[dx+5];\n                r0 |= (r1 << 16);\n                r5 += r0;\n                r5 += 0x10001;\n                r5 = (r5 >> 1) & 0xFF00FF;\n\n                r4 |= (r5 << 8);    /* pack them together */\n                *p_cur++ = r4;\n                r1 = r3;\n                r0 = r2;\n            }\n            p_cur += curr_offset; /* move to the next line */\n            p_ref += ref_offset;  /*    ref_offset = inpitch-blkwidth; */\n\n            if (r13&0xFF000700) /* need clipping */\n            {\n                /* move back to the beginning of the line */\n                p_ref -= (ref_offset + blkwidth);   /* input */\n                p_cur -= (outpitch >> 2);\n\n                tmp = (uint32)(p_ref + blkwidth);\n                for (; (uint32)p_ref < tmp;)\n                {\n\n                    r0 = *p_ref++;\n                    r1 = *p_ref++;\n                    r2 = *p_ref++;\n                    r3 = *p_ref++;\n                    r4 = *p_ref++;\n                    /* first pixel */\n                    r5 = *p_ref++;\n                    result = (r0 + r5);\n                    r0 = (r1 + r4);\n                    result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n                    r0 = (r2 + r3);\n                    result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    /* 3/4 pel,  no need to clip */\n                    result = (result + p_ref[dx] + 1);\n                    pkres = (result >> 1) ;\n                    /* second pixel */\n                    r0 = *p_ref++;\n                    result = (r1 + r0);\n                    r1 = (r2 + r5);\n                    result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n                    r1 = (r3 + r4);\n                    result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    /* 3/4 pel,  no need to clip */\n                    result = (result + p_ref[dx] + 1);\n                    result = (result >> 1);\n                    pkres  |= (result << 8);\n                    /* third pixel */\n                    r1 = *p_ref++;\n                    result = (r2 + r1);\n                    r2 = (r3 + r0);\n                    result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n                    r2 = (r4 + r5);\n                    result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    /* 3/4 pel,  no need to clip */\n                    result = (result + p_ref[dx] + 1);\n                    result = (result >> 1);\n                    pkres  |= (result << 16);\n                    /* fourth pixel */\n                    r2 = *p_ref++;\n                    result = (r3 + r2);\n                    r3 = (r4 + r1);\n                    result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n                    r3 = (r5 + r0);\n                    result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    /* 3/4 pel,  no need to clip */\n                    result = (result + p_ref[dx] + 1);\n                    result = (result >> 1);\n                    pkres  |= (result << 24);\n                    *p_cur++ = pkres; /* write 4 pixels */\n                    p_ref -= 5;  /* offset back to the middle of filter */\n                }\n                p_cur += curr_offset;  /* move to the next line */\n                p_ref += ref_offset;    /* move to the next line */\n            }\n        }\n    }\n    else\n    {\n        p_ref -= 2;\n        r13 = 0;\n        for (j = blkheight; j > 0; j--)\n        {\n            tmp = (uint32)(p_ref + blkwidth);\n            r0 = p_ref[0];\n            r1 = p_ref[2];\n            r0 |= (r1 << 16);           /* 0,c,0,a */\n            r1 = p_ref[1];\n            r2 = p_ref[3];\n            r1 |= (r2 << 16);           /* 0,d,0,b */\n            while ((uint32)p_ref < tmp)\n            {\n                r2 = *(p_ref += 4); /* move pointer to e */\n                r3 = p_ref[2];\n                r2 |= (r3 << 16);           /* 0,g,0,e */\n                r3 = p_ref[1];\n                r4 = p_ref[3];\n                r3 |= (r4 << 16);           /* 0,h,0,f */\n\n                r4 = r0 + r3;       /* c+h, a+f */\n                r5 = r0 + r1;   /* c+d, a+b */\n                r6 = r2 + r3;   /* g+h, e+f */\n                r5 >>= 16;\n                r5 |= (r6 << 16);   /* e+f, c+d */\n                r4 += r5 * 20;      /* c+20*e+20*f+h, a+20*c+20*d+f */\n                r4 += 0x100010; /* +16, +16 */\n                r5 = r1 + r2;       /* d+g, b+e */\n                r4 -= r5 * 5;       /* c-5*d+20*e+20*f-5*g+h, a-5*b+20*c+20*d-5*e+f */\n                r4 >>= 5;\n                r13 |= r4;      /* check clipping */\n                r4 &= 0xFF00FF; /* mask */\n\n                r5 = p_ref[4];  /* i */\n                r6 = (r5 << 16);\n                r5 = r6 | (r2 >> 16);/* 0,i,0,g */\n                r5 += r1;       /* d+i, b+g */ /* r5 not free */\n                r1 >>= 16;\n                r1 |= (r3 << 16); /* 0,f,0,d */ /* r1 has changed */\n                r1 += r2;       /* f+g, d+e */\n                r5 += 20 * r1;  /* d+20f+20g+i, b+20d+20e+g */\n                r0 >>= 16;\n                r0 |= (r2 << 16); /* 0,e,0,c */ /* r0 has changed */\n                r0 += r3;       /* e+h, c+f */\n                r5 += 0x100010; /* 16,16 */\n                r5 -= r0 * 5;       /* d-5e+20f+20g-5h+i, b-5c+20d+20e-5f+g */\n                r5 >>= 5;\n                r13 |= r5;      /* check clipping */\n                r5 &= 0xFF00FF; /* mask */\n\n                r4 |= (r5 << 8);    /* pack them together */\n                *p_cur++ = r4;\n                r1 = r3;\n                r0 = r2;\n            }\n            p_cur += curr_offset; /* move to the next line */\n            p_ref += ref_offset;  /*    ref_offset = inpitch-blkwidth; */\n\n            if (r13&0xFF000700) /* need clipping */\n            {\n                /* move back to the beginning of the line */\n                p_ref -= (ref_offset + blkwidth);   /* input */\n                p_cur -= (outpitch >> 2);\n\n                tmp = (uint32)(p_ref + blkwidth);\n                for (; (uint32)p_ref < tmp;)\n                {\n\n                    r0 = *p_ref++;\n                    r1 = *p_ref++;\n                    r2 = *p_ref++;\n                    r3 = *p_ref++;\n                    r4 = *p_ref++;\n                    /* first pixel */\n                    r5 = *p_ref++;\n                    result = (r0 + r5);\n                    r0 = (r1 + r4);\n                    result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n                    r0 = (r2 + r3);\n                    result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    pkres  = result;\n                    /* second pixel */\n                    r0 = *p_ref++;\n                    result = (r1 + r0);\n                    r1 = (r2 + r5);\n                    result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n                    r1 = (r3 + r4);\n                    result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    pkres  |= (result << 8);\n                    /* third pixel */\n                    r1 = *p_ref++;\n                    result = (r2 + r1);\n                    r2 = (r3 + r0);\n                    result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n                    r2 = (r4 + r5);\n                    result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    pkres  |= (result << 16);\n                    /* fourth pixel */\n                    r2 = *p_ref++;\n                    result = (r3 + r2);\n                    r3 = (r4 + r1);\n                    result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n                    r3 = (r5 + r0);\n                    result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    pkres  |= (result << 24);\n                    *p_cur++ = pkres;   /* write 4 pixels */\n                    p_ref -= 5;\n                }\n                p_cur += curr_offset; /* move to the next line */\n                p_ref += ref_offset;\n            }\n        }\n    }\n\n    return ;\n}\n\nvoid eHorzInterp2MC(int *in, int inpitch, uint8 *out, int outpitch,\n                    int blkwidth, int blkheight, int dx)\n{\n    int *p_ref;\n    uint32 *p_cur;\n    uint32 tmp, pkres;\n    int result, result2, curr_offset, ref_offset;\n    int j, r0, r1, r2, r3, r4, r5;\n\n    p_cur = (uint32*)out; /* assume it's word aligned */\n    curr_offset = (outpitch - blkwidth) >> 2;\n    p_ref = in;\n    ref_offset = inpitch - blkwidth;\n\n    if (dx&1)\n    {\n        dx = ((dx >> 1) ? -3 : -4); /* use in 3/4 pel */\n\n        for (j = blkheight; j > 0 ; j--)\n        {\n            tmp = (uint32)(p_ref + blkwidth);\n            for (; (uint32)p_ref < tmp;)\n            {\n\n                r0 = p_ref[-2];\n                r1 = p_ref[-1];\n                r2 = *p_ref++;\n                r3 = *p_ref++;\n                r4 = *p_ref++;\n                /* first pixel */\n                r5 = *p_ref++;\n                result = (r0 + r5);\n                r0 = (r1 + r4);\n                result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n                r0 = (r2 + r3);\n                result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                result2 = ((p_ref[dx] + 16) >> 5);\n                CLIP_RESULT(result2)\n                /* 3/4 pel,  no need to clip */\n                result = (result + result2 + 1);\n                pkres = (result >> 1);\n                /* second pixel */\n                r0 = *p_ref++;\n                result = (r1 + r0);\n                r1 = (r2 + r5);\n                result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n                r1 = (r3 + r4);\n                result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                result2 = ((p_ref[dx] + 16) >> 5);\n                CLIP_RESULT(result2)\n                /* 3/4 pel,  no need to clip */\n                result = (result + result2 + 1);\n                result = (result >> 1);\n                pkres  |= (result << 8);\n                /* third pixel */\n                r1 = *p_ref++;\n                result = (r2 + r1);\n                r2 = (r3 + r0);\n                result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n                r2 = (r4 + r5);\n                result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                result2 = ((p_ref[dx] + 16) >> 5);\n                CLIP_RESULT(result2)\n                /* 3/4 pel,  no need to clip */\n                result = (result + result2 + 1);\n                result = (result >> 1);\n                pkres  |= (result << 16);\n                /* fourth pixel */\n                r2 = *p_ref++;\n                result = (r3 + r2);\n                r3 = (r4 + r1);\n                result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n                r3 = (r5 + r0);\n                result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                result2 = ((p_ref[dx] + 16) >> 5);\n                CLIP_RESULT(result2)\n                /* 3/4 pel,  no need to clip */\n                result = (result + result2 + 1);\n                result = (result >> 1);\n                pkres  |= (result << 24);\n                *p_cur++ = pkres; /* write 4 pixels */\n                p_ref -= 3;  /* offset back to the middle of filter */\n            }\n            p_cur += curr_offset;  /* move to the next line */\n            p_ref += ref_offset;    /* move to the next line */\n        }\n    }\n    else\n    {\n        for (j = blkheight; j > 0 ; j--)\n        {\n            tmp = (uint32)(p_ref + blkwidth);\n            for (; (uint32)p_ref < tmp;)\n            {\n\n                r0 = p_ref[-2];\n                r1 = p_ref[-1];\n                r2 = *p_ref++;\n                r3 = *p_ref++;\n                r4 = *p_ref++;\n                /* first pixel */\n                r5 = *p_ref++;\n                result = (r0 + r5);\n                r0 = (r1 + r4);\n                result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n                r0 = (r2 + r3);\n                result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                pkres  = result;\n                /* second pixel */\n                r0 = *p_ref++;\n                result = (r1 + r0);\n                r1 = (r2 + r5);\n                result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n                r1 = (r3 + r4);\n                result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                pkres  |= (result << 8);\n                /* third pixel */\n                r1 = *p_ref++;\n                result = (r2 + r1);\n                r2 = (r3 + r0);\n                result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n                r2 = (r4 + r5);\n                result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                pkres  |= (result << 16);\n                /* fourth pixel */\n                r2 = *p_ref++;\n                result = (r3 + r2);\n                r3 = (r4 + r1);\n                result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n                r3 = (r5 + r0);\n                result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                pkres  |= (result << 24);\n                *p_cur++ = pkres; /* write 4 pixels */\n                p_ref -= 3;  /* offset back to the middle of filter */\n            }\n            p_cur += curr_offset;  /* move to the next line */\n            p_ref += ref_offset;    /* move to the next line */\n        }\n    }\n\n    return ;\n}\n\nvoid eHorzInterp3MC(uint8 *in, int inpitch, int *out, int outpitch,\n                    int blkwidth, int blkheight)\n{\n    uint8 *p_ref;\n    int   *p_cur;\n    uint32 tmp;\n    int result, curr_offset, ref_offset;\n    int j, r0, r1, r2, r3, r4, r5;\n\n    p_cur = out;\n    curr_offset = (outpitch - blkwidth);\n    p_ref = in;\n    ref_offset = inpitch - blkwidth;\n\n    for (j = blkheight; j > 0 ; j--)\n    {\n        tmp = (uint32)(p_ref + blkwidth);\n        for (; (uint32)p_ref < tmp;)\n        {\n\n            r0 = p_ref[-2];\n            r1 = p_ref[-1];\n            r2 = *p_ref++;\n            r3 = *p_ref++;\n            r4 = *p_ref++;\n            /* first pixel */\n            r5 = *p_ref++;\n            result = (r0 + r5);\n            r0 = (r1 + r4);\n            result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n            r0 = (r2 + r3);\n            result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n            *p_cur++ = result;\n            /* second pixel */\n            r0 = *p_ref++;\n            result = (r1 + r0);\n            r1 = (r2 + r5);\n            result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n            r1 = (r3 + r4);\n            result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n            *p_cur++ = result;\n            /* third pixel */\n            r1 = *p_ref++;\n            result = (r2 + r1);\n            r2 = (r3 + r0);\n            result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n            r2 = (r4 + r5);\n            result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n            *p_cur++ = result;\n            /* fourth pixel */\n            r2 = *p_ref++;\n            result = (r3 + r2);\n            r3 = (r4 + r1);\n            result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n            r3 = (r5 + r0);\n            result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n            *p_cur++ = result;\n            p_ref -= 3; /* move back to the middle of the filter */\n        }\n        p_cur += curr_offset; /* move to the next line */\n        p_ref += ref_offset;\n    }\n\n    return ;\n}\nvoid eVertInterp1MC(uint8 *in, int inpitch, uint8 *out, int outpitch,\n                    int blkwidth, int blkheight, int dy)\n{\n    uint8 *p_cur, *p_ref;\n    uint32 tmp;\n    int result, curr_offset, ref_offset;\n    int j, i;\n    int32 r0, r1, r2, r3, r4, r5, r6, r7, r8, r13;\n    uint8  tmp_in[24][24];\n\n    /* not word-aligned */\n    if (((uint32)in)&0x3)\n    {\n        eCreateAlign(in, inpitch, -2, &tmp_in[0][0], blkwidth, blkheight + 5);\n        in = &tmp_in[2][0];\n        inpitch = 24;\n    }\n    p_cur = out;\n    curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically back up and one pixel to right */\n    ref_offset = blkheight * inpitch; /* for limit */\n\n    curr_offset += 3;\n\n    if (dy&1)\n    {\n        dy = (dy >> 1) ? 0 : -inpitch;\n\n        for (j = 0; j < blkwidth; j += 4, in += 4)\n        {\n            r13 = 0;\n            p_ref = in;\n            p_cur -= outpitch;  /* compensate for the first offset */\n            tmp = (uint32)(p_ref + ref_offset); /* limit */\n            while ((uint32)p_ref < tmp)  /* the loop un-rolled  */\n            {\n                r0 = *((uint32*)(p_ref - (inpitch << 1))); /* load 4 bytes */\n                p_ref += inpitch;\n                r6 = (r0 >> 8) & 0xFF00FF; /* second and fourth byte */\n                r0 &= 0xFF00FF;\n\n                r1 = *((uint32*)(p_ref + (inpitch << 1)));  /* r1, r7, ref[3] */\n                r7 = (r1 >> 8) & 0xFF00FF;\n                r1 &= 0xFF00FF;\n\n                r0 += r1;\n                r6 += r7;\n\n                r2 = *((uint32*)p_ref); /* r2, r8, ref[1] */\n                r8 = (r2 >> 8) & 0xFF00FF;\n                r2 &= 0xFF00FF;\n\n                r1 = *((uint32*)(p_ref - inpitch)); /* r1, r7, ref[0] */\n                r7 = (r1 >> 8) & 0xFF00FF;\n                r1 &= 0xFF00FF;\n                r1 += r2;\n\n                r7 += r8;\n\n                r0 += 20 * r1;\n                r6 += 20 * r7;\n                r0 += 0x100010;\n                r6 += 0x100010;\n\n                r2 = *((uint32*)(p_ref - (inpitch << 1))); /* r2, r8, ref[-1] */\n                r8 = (r2 >> 8) & 0xFF00FF;\n                r2 &= 0xFF00FF;\n\n                r1 = *((uint32*)(p_ref + inpitch)); /* r1, r7, ref[2] */\n                r7 = (r1 >> 8) & 0xFF00FF;\n                r1 &= 0xFF00FF;\n                r1 += r2;\n\n                r7 += r8;\n\n                r0 -= 5 * r1;\n                r6 -= 5 * r7;\n\n                r0 >>= 5;\n                r6 >>= 5;\n                /* clip */\n                r13 |= r6;\n                r13 |= r0;\n                //CLIPPACK(r6,result)\n\n                r1 = *((uint32*)(p_ref + dy));\n                r2 = (r1 >> 8) & 0xFF00FF;\n                r1 &= 0xFF00FF;\n                r0 += r1;\n                r6 += r2;\n                r0 += 0x10001;\n                r6 += 0x10001;\n                r0 = (r0 >> 1) & 0xFF00FF;\n                r6 = (r6 >> 1) & 0xFF00FF;\n\n                r0 |= (r6 << 8);  /* pack it back */\n                *((uint32*)(p_cur += outpitch)) = r0;\n            }\n            p_cur += curr_offset; /* offset to the next pixel */\n            if (r13 & 0xFF000700) /* this column need clipping */\n            {\n                p_cur -= 4;\n                for (i = 0; i < 4; i++)\n                {\n                    p_ref = in + i;\n                    p_cur -= outpitch;  /* compensate for the first offset */\n\n                    tmp = (uint32)(p_ref + ref_offset); /* limit */\n                    while ((uint32)p_ref < tmp)\n                    {                           /* loop un-rolled */\n                        r0 = *(p_ref - (inpitch << 1));\n                        r1 = *(p_ref - inpitch);\n                        r2 = *p_ref;\n                        r3 = *(p_ref += inpitch);  /* modify pointer before loading */\n                        r4 = *(p_ref += inpitch);\n                        /* first pixel */\n                        r5 = *(p_ref += inpitch);\n                        result = (r0 + r5);\n                        r0 = (r1 + r4);\n                        result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n                        r0 = (r2 + r3);\n                        result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n                        result = (result + 16) >> 5;\n                        CLIP_RESULT(result)\n                        /* 3/4 pel,  no need to clip */\n                        result = (result + p_ref[dy-(inpitch<<1)] + 1);\n                        result = (result >> 1);\n                        *(p_cur += outpitch) = result;\n                        /* second pixel */\n                        r0 = *(p_ref += inpitch);\n                        result = (r1 + r0);\n                        r1 = (r2 + r5);\n                        result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n                        r1 = (r3 + r4);\n                        result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n                        result = (result + 16) >> 5;\n                        CLIP_RESULT(result)\n                        /* 3/4 pel,  no need to clip */\n                        result = (result + p_ref[dy-(inpitch<<1)] + 1);\n                        result = (result >> 1);\n                        *(p_cur += outpitch) = result;\n                        /* third pixel */\n                        r1 = *(p_ref += inpitch);\n                        result = (r2 + r1);\n                        r2 = (r3 + r0);\n                        result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n                        r2 = (r4 + r5);\n                        result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n                        result = (result + 16) >> 5;\n                        CLIP_RESULT(result)\n                        /* 3/4 pel,  no need to clip */\n                        result = (result + p_ref[dy-(inpitch<<1)] + 1);\n                        result = (result >> 1);\n                        *(p_cur += outpitch) = result;\n                        /* fourth pixel */\n                        r2 = *(p_ref += inpitch);\n                        result = (r3 + r2);\n                        r3 = (r4 + r1);\n                        result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n                        r3 = (r5 + r0);\n                        result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n                        result = (result + 16) >> 5;\n                        CLIP_RESULT(result)\n                        /* 3/4 pel,  no need to clip */\n                        result = (result + p_ref[dy-(inpitch<<1)] + 1);\n                        result = (result >> 1);\n                        *(p_cur += outpitch) = result;\n                        p_ref -= (inpitch << 1);  /* move back to center of the filter of the next one */\n                    }\n                    p_cur += (curr_offset - 3);\n                }\n            }\n        }\n    }\n    else\n    {\n        for (j = 0; j < blkwidth; j += 4, in += 4)\n        {\n            r13 = 0;\n            p_ref = in;\n            p_cur -= outpitch;  /* compensate for the first offset */\n            tmp = (uint32)(p_ref + ref_offset); /* limit */\n            while ((uint32)p_ref < tmp)  /* the loop un-rolled  */\n            {\n                r0 = *((uint32*)(p_ref - (inpitch << 1))); /* load 4 bytes */\n                p_ref += inpitch;\n                r6 = (r0 >> 8) & 0xFF00FF; /* second and fourth byte */\n                r0 &= 0xFF00FF;\n\n                r1 = *((uint32*)(p_ref + (inpitch << 1)));  /* r1, r7, ref[3] */\n                r7 = (r1 >> 8) & 0xFF00FF;\n                r1 &= 0xFF00FF;\n\n                r0 += r1;\n                r6 += r7;\n\n                r2 = *((uint32*)p_ref); /* r2, r8, ref[1] */\n                r8 = (r2 >> 8) & 0xFF00FF;\n                r2 &= 0xFF00FF;\n\n                r1 = *((uint32*)(p_ref - inpitch)); /* r1, r7, ref[0] */\n                r7 = (r1 >> 8) & 0xFF00FF;\n                r1 &= 0xFF00FF;\n                r1 += r2;\n\n                r7 += r8;\n\n                r0 += 20 * r1;\n                r6 += 20 * r7;\n                r0 += 0x100010;\n                r6 += 0x100010;\n\n                r2 = *((uint32*)(p_ref - (inpitch << 1))); /* r2, r8, ref[-1] */\n                r8 = (r2 >> 8) & 0xFF00FF;\n                r2 &= 0xFF00FF;\n\n                r1 = *((uint32*)(p_ref + inpitch)); /* r1, r7, ref[2] */\n                r7 = (r1 >> 8) & 0xFF00FF;\n                r1 &= 0xFF00FF;\n                r1 += r2;\n\n                r7 += r8;\n\n                r0 -= 5 * r1;\n                r6 -= 5 * r7;\n\n                r0 >>= 5;\n                r6 >>= 5;\n                /* clip */\n                r13 |= r6;\n                r13 |= r0;\n                //CLIPPACK(r6,result)\n                r0 &= 0xFF00FF;\n                r6 &= 0xFF00FF;\n                r0 |= (r6 << 8);  /* pack it back */\n                *((uint32*)(p_cur += outpitch)) = r0;\n            }\n            p_cur += curr_offset; /* offset to the next pixel */\n            if (r13 & 0xFF000700) /* this column need clipping */\n            {\n                p_cur -= 4;\n                for (i = 0; i < 4; i++)\n                {\n                    p_ref = in + i;\n                    p_cur -= outpitch;  /* compensate for the first offset */\n                    tmp = (uint32)(p_ref + ref_offset); /* limit */\n                    while ((uint32)p_ref < tmp)\n                    {                           /* loop un-rolled */\n                        r0 = *(p_ref - (inpitch << 1));\n                        r1 = *(p_ref - inpitch);\n                        r2 = *p_ref;\n                        r3 = *(p_ref += inpitch);  /* modify pointer before loading */\n                        r4 = *(p_ref += inpitch);\n                        /* first pixel */\n                        r5 = *(p_ref += inpitch);\n                        result = (r0 + r5);\n                        r0 = (r1 + r4);\n                        result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n                        r0 = (r2 + r3);\n                        result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n                        result = (result + 16) >> 5;\n                        CLIP_RESULT(result)\n                        *(p_cur += outpitch) = result;\n                        /* second pixel */\n                        r0 = *(p_ref += inpitch);\n                        result = (r1 + r0);\n                        r1 = (r2 + r5);\n                        result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n                        r1 = (r3 + r4);\n                        result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n                        result = (result + 16) >> 5;\n                        CLIP_RESULT(result)\n                        *(p_cur += outpitch) = result;\n                        /* third pixel */\n                        r1 = *(p_ref += inpitch);\n                        result = (r2 + r1);\n                        r2 = (r3 + r0);\n                        result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n                        r2 = (r4 + r5);\n                        result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n                        result = (result + 16) >> 5;\n                        CLIP_RESULT(result)\n                        *(p_cur += outpitch) = result;\n                        /* fourth pixel */\n                        r2 = *(p_ref += inpitch);\n                        result = (r3 + r2);\n                        r3 = (r4 + r1);\n                        result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n                        r3 = (r5 + r0);\n                        result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n                        result = (result + 16) >> 5;\n                        CLIP_RESULT(result)\n                        *(p_cur += outpitch) = result;\n                        p_ref -= (inpitch << 1);  /* move back to center of the filter of the next one */\n                    }\n                    p_cur += (curr_offset - 3);\n                }\n            }\n        }\n    }\n\n    return ;\n}\n\nvoid eVertInterp2MC(uint8 *in, int inpitch, int *out, int outpitch,\n                    int blkwidth, int blkheight)\n{\n    int *p_cur;\n    uint8 *p_ref;\n    uint32 tmp;\n    int result, curr_offset, ref_offset;\n    int j, r0, r1, r2, r3, r4, r5;\n\n    p_cur = out;\n    curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically back up and one pixel to right */\n    ref_offset = blkheight * inpitch; /* for limit */\n\n    for (j = 0; j < blkwidth; j++)\n    {\n        p_cur -= outpitch; /* compensate for the first offset */\n        p_ref = in++;\n\n        tmp = (uint32)(p_ref + ref_offset); /* limit */\n        while ((uint32)p_ref < tmp)\n        {                           /* loop un-rolled */\n            r0 = *(p_ref - (inpitch << 1));\n            r1 = *(p_ref - inpitch);\n            r2 = *p_ref;\n            r3 = *(p_ref += inpitch);  /* modify pointer before loading */\n            r4 = *(p_ref += inpitch);\n            /* first pixel */\n            r5 = *(p_ref += inpitch);\n            result = (r0 + r5);\n            r0 = (r1 + r4);\n            result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n            r0 = (r2 + r3);\n            result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n            *(p_cur += outpitch) = result;\n            /* second pixel */\n            r0 = *(p_ref += inpitch);\n            result = (r1 + r0);\n            r1 = (r2 + r5);\n            result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n            r1 = (r3 + r4);\n            result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n            *(p_cur += outpitch) = result;\n            /* third pixel */\n            r1 = *(p_ref += inpitch);\n            result = (r2 + r1);\n            r2 = (r3 + r0);\n            result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n            r2 = (r4 + r5);\n            result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n            *(p_cur += outpitch) = result;\n            /* fourth pixel */\n            r2 = *(p_ref += inpitch);\n            result = (r3 + r2);\n            r3 = (r4 + r1);\n            result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n            r3 = (r5 + r0);\n            result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n            *(p_cur += outpitch) = result;\n            p_ref -= (inpitch << 1);  /* move back to center of the filter of the next one */\n        }\n        p_cur += curr_offset;\n    }\n\n    return ;\n}\n\nvoid eVertInterp3MC(int *in, int inpitch, uint8 *out, int outpitch,\n                    int blkwidth, int blkheight, int dy)\n{\n    uint8 *p_cur;\n    int *p_ref;\n    uint32 tmp;\n    int result, result2, curr_offset, ref_offset;\n    int j, r0, r1, r2, r3, r4, r5;\n\n    p_cur = out;\n    curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically back up and one pixel to right */\n    ref_offset = blkheight * inpitch; /* for limit */\n\n    if (dy&1)\n    {\n        dy = (dy >> 1) ? -(inpitch << 1) : -(inpitch << 1) - inpitch;\n\n        for (j = 0; j < blkwidth; j++)\n        {\n            p_cur -= outpitch; /* compensate for the first offset */\n            p_ref = in++;\n\n            tmp = (uint32)(p_ref + ref_offset); /* limit */\n            while ((uint32)p_ref < tmp)\n            {                           /* loop un-rolled */\n                r0 = *(p_ref - (inpitch << 1));\n                r1 = *(p_ref - inpitch);\n                r2 = *p_ref;\n                r3 = *(p_ref += inpitch);  /* modify pointer before loading */\n                r4 = *(p_ref += inpitch);\n                /* first pixel */\n                r5 = *(p_ref += inpitch);\n                result = (r0 + r5);\n                r0 = (r1 + r4);\n                result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n                r0 = (r2 + r3);\n                result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                result2 = ((p_ref[dy] + 16) >> 5);\n                CLIP_RESULT(result2)\n                /* 3/4 pel,  no need to clip */\n                result = (result + result2 + 1);\n                result = (result >> 1);\n                *(p_cur += outpitch) = result;\n                /* second pixel */\n                r0 = *(p_ref += inpitch);\n                result = (r1 + r0);\n                r1 = (r2 + r5);\n                result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n                r1 = (r3 + r4);\n                result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                result2 = ((p_ref[dy] + 16) >> 5);\n                CLIP_RESULT(result2)\n                /* 3/4 pel,  no need to clip */\n                result = (result + result2 + 1);\n                result = (result >> 1);\n                *(p_cur += outpitch) = result;\n                /* third pixel */\n                r1 = *(p_ref += inpitch);\n                result = (r2 + r1);\n                r2 = (r3 + r0);\n                result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n                r2 = (r4 + r5);\n                result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                result2 = ((p_ref[dy] + 16) >> 5);\n                CLIP_RESULT(result2)\n                /* 3/4 pel,  no need to clip */\n                result = (result + result2 + 1);\n                result = (result >> 1);\n                *(p_cur += outpitch) = result;\n                /* fourth pixel */\n                r2 = *(p_ref += inpitch);\n                result = (r3 + r2);\n                r3 = (r4 + r1);\n                result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n                r3 = (r5 + r0);\n                result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                result2 = ((p_ref[dy] + 16) >> 5);\n                CLIP_RESULT(result2)\n                /* 3/4 pel,  no need to clip */\n                result = (result + result2 + 1);\n                result = (result >> 1);\n                *(p_cur += outpitch) = result;\n                p_ref -= (inpitch << 1);  /* move back to center of the filter of the next one */\n            }\n            p_cur += curr_offset;\n        }\n    }\n    else\n    {\n        for (j = 0; j < blkwidth; j++)\n        {\n            p_cur -= outpitch; /* compensate for the first offset */\n            p_ref = in++;\n\n            tmp = (uint32)(p_ref + ref_offset); /* limit */\n            while ((uint32)p_ref < tmp)\n            {                           /* loop un-rolled */\n                r0 = *(p_ref - (inpitch << 1));\n                r1 = *(p_ref - inpitch);\n                r2 = *p_ref;\n                r3 = *(p_ref += inpitch);  /* modify pointer before loading */\n                r4 = *(p_ref += inpitch);\n                /* first pixel */\n                r5 = *(p_ref += inpitch);\n                result = (r0 + r5);\n                r0 = (r1 + r4);\n                result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n                r0 = (r2 + r3);\n                result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                *(p_cur += outpitch) = result;\n                /* second pixel */\n                r0 = *(p_ref += inpitch);\n                result = (r1 + r0);\n                r1 = (r2 + r5);\n                result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n                r1 = (r3 + r4);\n                result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                *(p_cur += outpitch) = result;\n                /* third pixel */\n                r1 = *(p_ref += inpitch);\n                result = (r2 + r1);\n                r2 = (r3 + r0);\n                result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n                r2 = (r4 + r5);\n                result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                *(p_cur += outpitch) = result;\n                /* fourth pixel */\n                r2 = *(p_ref += inpitch);\n                result = (r3 + r2);\n                r3 = (r4 + r1);\n                result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n                r3 = (r5 + r0);\n                result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n                result = (result + 512) >> 10;\n                CLIP_RESULT(result)\n                *(p_cur += outpitch) = result;\n                p_ref -= (inpitch << 1);  /* move back to center of the filter of the next one */\n            }\n            p_cur += curr_offset;\n        }\n    }\n\n    return ;\n}\n\nvoid eDiagonalInterpMC(uint8 *in1, uint8 *in2, int inpitch,\n                       uint8 *out, int outpitch,\n                       int blkwidth, int blkheight)\n{\n    int j, i;\n    int result;\n    uint8 *p_cur, *p_ref, *p_tmp8;\n    int curr_offset, ref_offset;\n    uint8 tmp_res[24][24], tmp_in[24][24];\n    uint32 *p_tmp;\n    uint32 tmp, pkres, tmp_result;\n    int32 r0, r1, r2, r3, r4, r5;\n    int32 r6, r7, r8, r9, r10, r13;\n    void *tmp_void;\n\n    ref_offset = inpitch - blkwidth;\n    p_ref = in1 - 2;\n    /* perform horizontal interpolation */\n    /* not word-aligned */\n    /* It is faster to read 1 byte at time to avoid calling CreateAlign */\n    /*  if(((uint32)p_ref)&0x3)\n        {\n            CreateAlign(p_ref,inpitch,0,&tmp_in[0][0],blkwidth+8,blkheight);\n            p_ref = &tmp_in[0][0];\n            ref_offset = 24-blkwidth;\n        }*/\n\n    tmp_void = (void*) & (tmp_res[0][0]);\n    p_tmp = (uint32*) tmp_void;\n\n    for (j = blkheight; j > 0; j--)\n    {\n        r13 = 0;\n        tmp = (uint32)(p_ref + blkwidth);\n\n        //r0 = *((uint32*)p_ref);   /* d,c,b,a */\n        //r1 = (r0>>8)&0xFF00FF;    /* 0,d,0,b */\n        //r0 &= 0xFF00FF;           /* 0,c,0,a */\n        /* It is faster to read 1 byte at a time */\n        r0 = p_ref[0];\n        r1 = p_ref[2];\n        r0 |= (r1 << 16);           /* 0,c,0,a */\n        r1 = p_ref[1];\n        r2 = p_ref[3];\n        r1 |= (r2 << 16);           /* 0,d,0,b */\n\n        while ((uint32)p_ref < tmp)\n        {\n            //r2 = *((uint32*)(p_ref+=4));/* h,g,f,e */\n            //r3 = (r2>>8)&0xFF00FF;  /* 0,h,0,f */\n            //r2 &= 0xFF00FF;           /* 0,g,0,e */\n            /* It is faster to read 1 byte at a time */\n            r2 = *(p_ref += 4);\n            r3 = p_ref[2];\n            r2 |= (r3 << 16);           /* 0,g,0,e */\n            r3 = p_ref[1];\n            r4 = p_ref[3];\n            r3 |= (r4 << 16);           /* 0,h,0,f */\n\n            r4 = r0 + r3;       /* c+h, a+f */\n            r5 = r0 + r1;   /* c+d, a+b */\n            r6 = r2 + r3;   /* g+h, e+f */\n            r5 >>= 16;\n            r5 |= (r6 << 16);   /* e+f, c+d */\n            r4 += r5 * 20;      /* c+20*e+20*f+h, a+20*c+20*d+f */\n            r4 += 0x100010; /* +16, +16 */\n            r5 = r1 + r2;       /* d+g, b+e */\n            r4 -= r5 * 5;       /* c-5*d+20*e+20*f-5*g+h, a-5*b+20*c+20*d-5*e+f */\n            r4 >>= 5;\n            r13 |= r4;      /* check clipping */\n            r4 &= 0xFF00FF; /* mask */\n\n            r5 = p_ref[4];  /* i */\n            r6 = (r5 << 16);\n            r5 = r6 | (r2 >> 16);/* 0,i,0,g */\n            r5 += r1;       /* d+i, b+g */ /* r5 not free */\n            r1 >>= 16;\n            r1 |= (r3 << 16); /* 0,f,0,d */ /* r1 has changed */\n            r1 += r2;       /* f+g, d+e */\n            r5 += 20 * r1;  /* d+20f+20g+i, b+20d+20e+g */\n            r0 >>= 16;\n            r0 |= (r2 << 16); /* 0,e,0,c */ /* r0 has changed */\n            r0 += r3;       /* e+h, c+f */\n            r5 += 0x100010; /* 16,16 */\n            r5 -= r0 * 5;       /* d-5e+20f+20g-5h+i, b-5c+20d+20e-5f+g */\n            r5 >>= 5;\n            r13 |= r5;      /* check clipping */\n            r5 &= 0xFF00FF; /* mask */\n\n            r4 |= (r5 << 8);    /* pack them together */\n            *p_tmp++ = r4;\n            r1 = r3;\n            r0 = r2;\n        }\n        p_tmp += ((24 - blkwidth) >> 2); /* move to the next line */\n        p_ref += ref_offset;  /*    ref_offset = inpitch-blkwidth; */\n\n        if (r13&0xFF000700) /* need clipping */\n        {\n            /* move back to the beginning of the line */\n            p_ref -= (ref_offset + blkwidth);   /* input */\n            p_tmp -= 6; /* intermediate output */\n            tmp = (uint32)(p_ref + blkwidth);\n            while ((uint32)p_ref < tmp)\n            {\n                r0 = *p_ref++;\n                r1 = *p_ref++;\n                r2 = *p_ref++;\n                r3 = *p_ref++;\n                r4 = *p_ref++;\n                /* first pixel */\n                r5 = *p_ref++;\n                result = (r0 + r5);\n                r0 = (r1 + r4);\n                result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n                r0 = (r2 + r3);\n                result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n                result = (result + 16) >> 5;\n                CLIP_RESULT(result)\n                pkres = result;\n                /* second pixel */\n                r0 = *p_ref++;\n                result = (r1 + r0);\n                r1 = (r2 + r5);\n                result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n                r1 = (r3 + r4);\n                result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n                result = (result + 16) >> 5;\n                CLIP_RESULT(result)\n                pkres |= (result << 8);\n                /* third pixel */\n                r1 = *p_ref++;\n                result = (r2 + r1);\n                r2 = (r3 + r0);\n                result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n                r2 = (r4 + r5);\n                result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n                result = (result + 16) >> 5;\n                CLIP_RESULT(result)\n                pkres |= (result << 16);\n                /* fourth pixel */\n                r2 = *p_ref++;\n                result = (r3 + r2);\n                r3 = (r4 + r1);\n                result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n                r3 = (r5 + r0);\n                result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n                result = (result + 16) >> 5;\n                CLIP_RESULT(result)\n                pkres |= (result << 24);\n\n                *p_tmp++ = pkres; /* write 4 pixel */\n                p_ref -= 5;\n            }\n            p_tmp += ((24 - blkwidth) >> 2); /* move to the next line */\n            p_ref += ref_offset;  /*    ref_offset = inpitch-blkwidth; */\n        }\n    }\n\n    /*  perform vertical interpolation */\n    /* not word-aligned */\n    if (((uint32)in2)&0x3)\n    {\n        eCreateAlign(in2, inpitch, -2, &tmp_in[0][0], blkwidth, blkheight + 5);\n        in2 = &tmp_in[2][0];\n        inpitch = 24;\n    }\n\n    p_cur = out;\n    curr_offset = 1 - outpitch * (blkheight - 1); /* offset vertically up and one pixel right */\n    pkres = blkheight * inpitch; /* reuse it for limit */\n\n    curr_offset += 3;\n\n    for (j = 0; j < blkwidth; j += 4, in2 += 4)\n    {\n        r13 = 0;\n        p_ref = in2;\n        p_tmp8 = &(tmp_res[0][j]); /* intermediate result */\n        p_tmp8 -= 24;  /* compensate for the first offset */\n        p_cur -= outpitch;  /* compensate for the first offset */\n        tmp = (uint32)(p_ref + pkres); /* limit */\n        while ((uint32)p_ref < tmp)  /* the loop un-rolled  */\n        {\n            /* Read 1 byte at a time is too slow, too many read and pack ops, need to call CreateAlign */\n            /*p_ref8 = p_ref-(inpitch<<1);          r0 = p_ref8[0];         r1 = p_ref8[2];\n            r0 |= (r1<<16);         r6 = p_ref8[1];         r1 = p_ref8[3];\n            r6 |= (r1<<16);         p_ref+=inpitch; */\n            r0 = *((uint32*)(p_ref - (inpitch << 1))); /* load 4 bytes */\n            p_ref += inpitch;\n            r6 = (r0 >> 8) & 0xFF00FF; /* second and fourth byte */\n            r0 &= 0xFF00FF;\n\n            /*p_ref8 = p_ref+(inpitch<<1);\n            r1 = p_ref8[0];         r7 = p_ref8[2];         r1 |= (r7<<16);\n            r7 = p_ref8[1];         r2 = p_ref8[3];         r7 |= (r2<<16);*/\n            r1 = *((uint32*)(p_ref + (inpitch << 1)));  /* r1, r7, ref[3] */\n            r7 = (r1 >> 8) & 0xFF00FF;\n            r1 &= 0xFF00FF;\n\n            r0 += r1;\n            r6 += r7;\n\n            /*r2 = p_ref[0];            r8 = p_ref[2];          r2 |= (r8<<16);\n            r8 = p_ref[1];          r1 = p_ref[3];          r8 |= (r1<<16);*/\n            r2 = *((uint32*)p_ref); /* r2, r8, ref[1] */\n            r8 = (r2 >> 8) & 0xFF00FF;\n            r2 &= 0xFF00FF;\n\n            /*p_ref8 = p_ref-inpitch;           r1 = p_ref8[0];         r7 = p_ref8[2];\n            r1 |= (r7<<16);         r1 += r2;           r7 = p_ref8[1];\n            r2 = p_ref8[3];         r7 |= (r2<<16);*/\n            r1 = *((uint32*)(p_ref - inpitch)); /* r1, r7, ref[0] */\n            r7 = (r1 >> 8) & 0xFF00FF;\n            r1 &= 0xFF00FF;\n            r1 += r2;\n\n            r7 += r8;\n\n            r0 += 20 * r1;\n            r6 += 20 * r7;\n            r0 += 0x100010;\n            r6 += 0x100010;\n\n            /*p_ref8 = p_ref-(inpitch<<1);          r2 = p_ref8[0];         r8 = p_ref8[2];\n            r2 |= (r8<<16);         r8 = p_ref8[1];         r1 = p_ref8[3];         r8 |= (r1<<16);*/\n            r2 = *((uint32*)(p_ref - (inpitch << 1))); /* r2, r8, ref[-1] */\n            r8 = (r2 >> 8) & 0xFF00FF;\n            r2 &= 0xFF00FF;\n\n            /*p_ref8 = p_ref+inpitch;           r1 = p_ref8[0];         r7 = p_ref8[2];\n            r1 |= (r7<<16);         r1 += r2;           r7 = p_ref8[1];\n            r2 = p_ref8[3];         r7 |= (r2<<16);*/\n            r1 = *((uint32*)(p_ref + inpitch)); /* r1, r7, ref[2] */\n            r7 = (r1 >> 8) & 0xFF00FF;\n            r1 &= 0xFF00FF;\n            r1 += r2;\n\n            r7 += r8;\n\n            r0 -= 5 * r1;\n            r6 -= 5 * r7;\n\n            r0 >>= 5;\n            r6 >>= 5;\n            /* clip */\n            r13 |= r6;\n            r13 |= r0;\n            //CLIPPACK(r6,result)\n            /* add with horizontal results */\n            r10 = *((uint32*)(p_tmp8 += 24));\n            r9 = (r10 >> 8) & 0xFF00FF;\n            r10 &= 0xFF00FF;\n\n            r0 += r10;\n            r0 += 0x10001;\n            r0 = (r0 >> 1) & 0xFF00FF;   /* mask to 8 bytes */\n\n            r6 += r9;\n            r6 += 0x10001;\n            r6 = (r6 >> 1) & 0xFF00FF;   /* mask to 8 bytes */\n\n            r0 |= (r6 << 8);  /* pack it back */\n            *((uint32*)(p_cur += outpitch)) = r0;\n        }\n        p_cur += curr_offset; /* offset to the next pixel */\n        if (r13 & 0xFF000700) /* this column need clipping */\n        {\n            p_cur -= 4;\n            for (i = 0; i < 4; i++)\n            {\n                p_ref = in2 + i;\n                p_tmp8 = &(tmp_res[0][j+i]); /* intermediate result */\n                p_tmp8 -= 24;  /* compensate for the first offset */\n                p_cur -= outpitch;  /* compensate for the first offset */\n                tmp = (uint32)(p_ref + pkres); /* limit */\n                while ((uint32)p_ref < tmp)  /* the loop un-rolled  */\n                {\n                    r0 = *(p_ref - (inpitch << 1));\n                    r1 = *(p_ref - inpitch);\n                    r2 = *p_ref;\n                    r3 = *(p_ref += inpitch);  /* modify pointer before loading */\n                    r4 = *(p_ref += inpitch);\n                    /* first pixel */\n                    r5 = *(p_ref += inpitch);\n                    result = (r0 + r5);\n                    r0 = (r1 + r4);\n                    result -= (r0 * 5);//result -= r0;  result -= (r0<<2);\n                    r0 = (r2 + r3);\n                    result += (r0 * 20);//result += (r0<<4);    result += (r0<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    tmp_result = *(p_tmp8 += 24);  /* modify pointer before loading */\n                    result = (result + tmp_result + 1);  /* no clip */\n                    result = (result >> 1);\n                    *(p_cur += outpitch) = result;\n                    /* second pixel */\n                    r0 = *(p_ref += inpitch);\n                    result = (r1 + r0);\n                    r1 = (r2 + r5);\n                    result -= (r1 * 5);//result -= r1;  result -= (r1<<2);\n                    r1 = (r3 + r4);\n                    result += (r1 * 20);//result += (r1<<4);    result += (r1<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    tmp_result = *(p_tmp8 += 24);  /* intermediate result */\n                    result = (result + tmp_result + 1);  /* no clip */\n                    result = (result >> 1);\n                    *(p_cur += outpitch) = result;\n                    /* third pixel */\n                    r1 = *(p_ref += inpitch);\n                    result = (r2 + r1);\n                    r2 = (r3 + r0);\n                    result -= (r2 * 5);//result -= r2;  result -= (r2<<2);\n                    r2 = (r4 + r5);\n                    result += (r2 * 20);//result += (r2<<4);    result += (r2<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    tmp_result = *(p_tmp8 += 24);  /* intermediate result */\n                    result = (result + tmp_result + 1);  /* no clip */\n                    result = (result >> 1);\n                    *(p_cur += outpitch) = result;\n                    /* fourth pixel */\n                    r2 = *(p_ref += inpitch);\n                    result = (r3 + r2);\n                    r3 = (r4 + r1);\n                    result -= (r3 * 5);//result -= r3;  result -= (r3<<2);\n                    r3 = (r5 + r0);\n                    result += (r3 * 20);//result += (r3<<4);    result += (r3<<2);\n                    result = (result + 16) >> 5;\n                    CLIP_RESULT(result)\n                    tmp_result = *(p_tmp8 += 24);  /* intermediate result */\n                    result = (result + tmp_result + 1);  /* no clip */\n                    result = (result >> 1);\n                    *(p_cur += outpitch) = result;\n                    p_ref -= (inpitch << 1);  /* move back to center of the filter of the next one */\n                }\n                p_cur += (curr_offset - 3);\n            }\n        }\n    }\n\n    return ;\n}\n\n/* position G */\nvoid eFullPelMC(uint8 *in, int inpitch, uint8 *out, int outpitch,\n                int blkwidth, int blkheight)\n{\n    int i, j;\n    int offset_in = inpitch - blkwidth;\n    int offset_out = outpitch - blkwidth;\n    uint32 temp;\n    uint8 byte;\n\n    if (((uint32)in)&3)\n    {\n        for (j = blkheight; j > 0; j--)\n        {\n            for (i = blkwidth; i > 0; i -= 4)\n            {\n                temp = *in++;\n                byte = *in++;\n                temp |= (byte << 8);\n                byte = *in++;\n                temp |= (byte << 16);\n                byte = *in++;\n                temp |= (byte << 24);\n\n                *((uint32*)out) = temp; /* write 4 bytes */\n                out += 4;\n            }\n            out += offset_out;\n            in += offset_in;\n        }\n    }\n    else\n    {\n        for (j = blkheight; j > 0; j--)\n        {\n            for (i = blkwidth; i > 0; i -= 4)\n            {\n                temp = *((uint32*)in);\n                *((uint32*)out) = temp;\n                in += 4;\n                out += 4;\n            }\n            out += offset_out;\n            in += offset_in;\n        }\n    }\n    return ;\n}\n\nvoid ePadChroma(uint8 *ref, int picwidth, int picheight, int picpitch, int x_pos, int y_pos)\n{\n    int pad_height;\n    int pad_width;\n    uint8 *start;\n    uint32 word1, word2, word3;\n    int offset, j;\n\n\n    pad_height = 8 + ((y_pos & 7) ? 1 : 0);\n    pad_width = 8 + ((x_pos & 7) ? 1 : 0);\n\n    y_pos >>= 3;\n    x_pos >>= 3;\n    // pad vertical first\n    if (y_pos < 0) // need to pad up\n    {\n        if (x_pos < -8) start = ref - 8;\n        else if (x_pos + pad_width > picwidth + 7) start = ref + picwidth + 7 - pad_width;\n        else start = ref + x_pos;\n\n        /* word-align start */\n        offset = (uint32)start & 0x3;\n        if (offset) start -= offset;\n\n        word1 = *((uint32*)start);\n        word2 = *((uint32*)(start + 4));\n        word3 = *((uint32*)(start + 8));\n\n        /* pad up N rows */\n        j = -y_pos;\n        if (j > 8) j = 8;\n        while (j--)\n        {\n            *((uint32*)(start -= picpitch)) = word1;\n            *((uint32*)(start + 4)) = word2;\n            *((uint32*)(start + 8)) = word3;\n        }\n\n    }\n    else if (y_pos + pad_height >= picheight) /* pad down */\n    {\n        if (x_pos < -8) start = ref + picpitch * (picheight - 1) - 8;\n        else if (x_pos + pad_width > picwidth + 7) start = ref + picpitch * (picheight - 1) +\n                    picwidth + 7 - pad_width;\n        else    start = ref + picpitch * (picheight - 1) + x_pos;\n\n        /* word-align start */\n        offset = (uint32)start & 0x3;\n        if (offset) start -= offset;\n\n        word1 = *((uint32*)start);\n        word2 = *((uint32*)(start + 4));\n        word3 = *((uint32*)(start + 8));\n\n        /* pad down N rows */\n        j = y_pos + pad_height - picheight;\n        if (j > 8) j = 8;\n        while (j--)\n        {\n            *((uint32*)(start += picpitch)) = word1;\n            *((uint32*)(start + 4)) = word2;\n            *((uint32*)(start + 8)) = word3;\n        }\n    }\n\n    /* now pad horizontal */\n    if (x_pos < 0) // pad left\n    {\n        if (y_pos < -8) start = ref - (picpitch << 3);\n        else if (y_pos + pad_height > picheight + 7) start = ref + (picheight + 7 - pad_height) * picpitch;\n        else start = ref + y_pos * picpitch;\n\n        // now pad left 8 pixels for pad_height rows */\n        j = pad_height;\n        start -= picpitch;\n        while (j--)\n        {\n            word1 = *(start += picpitch);\n            word1 |= (word1 << 8);\n            word1 |= (word1 << 16);\n            *((uint32*)(start - 8)) = word1;\n            *((uint32*)(start - 4)) = word1;\n        }\n    }\n    else if (x_pos + pad_width >= picwidth) /* pad right */\n    {\n        if (y_pos < -8) start = ref - (picpitch << 3) + picwidth - 1;\n        else if (y_pos + pad_height > picheight + 7) start = ref + (picheight + 7 - pad_height) * picpitch + picwidth - 1;\n        else start = ref + y_pos * picpitch + picwidth - 1;\n\n        // now pad right 8 pixels for pad_height rows */\n        j = pad_height;\n        start -= picpitch;\n        while (j--)\n        {\n            word1 = *(start += picpitch);\n            word1 |= (word1 << 8);\n            word1 |= (word1 << 16);\n            *((uint32*)(start + 1)) = word1;\n            *((uint32*)(start + 5)) = word1;\n        }\n    }\n\n    return ;\n}\n\n\nvoid eChromaMotionComp(uint8 *ref, int picwidth, int picheight,\n                       int x_pos, int y_pos,\n                       uint8 *pred, int picpitch,\n                       int blkwidth, int blkheight)\n{\n    int dx, dy;\n    int offset_dx, offset_dy;\n    int index;\n\n    ePadChroma(ref, picwidth, picheight, picpitch, x_pos, y_pos);\n\n    dx = x_pos & 7;\n    dy = y_pos & 7;\n    offset_dx = (dx + 7) >> 3;\n    offset_dy = (dy + 7) >> 3;\n    x_pos = x_pos >> 3;  /* round it to full-pel resolution */\n    y_pos = y_pos >> 3;\n\n    ref += y_pos * picpitch + x_pos;\n\n    index = offset_dx + (offset_dy << 1) + ((blkwidth << 1) & 0x7);\n\n    (*(eChromaMC_SIMD[index]))(ref, picpitch , dx, dy, pred, picpitch, blkwidth, blkheight);\n    return ;\n}\n\n\n/* SIMD routines, unroll the loops in vertical direction, decreasing loops (things to be done) */\nvoid eChromaDiagonalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                            uint8 *pOut, int predPitch, int blkwidth, int blkheight)\n{\n    int32 r0, r1, r2, r3, result0, result1;\n    uint8 temp[288];\n    uint8 *ref, *out;\n    int i, j;\n    int dx_8 = 8 - dx;\n    int dy_8 = 8 - dy;\n\n    /* horizontal first */\n    out = temp;\n    for (i = 0; i < blkheight + 1; i++)\n    {\n        ref = pRef;\n        r0 = ref[0];\n        for (j = 0; j < blkwidth; j += 4)\n        {\n            r0 |= (ref[2] << 16);\n            result0 = dx_8 * r0;\n\n            r1 = ref[1] | (ref[3] << 16);\n            result0 += dx * r1;\n            *(int32 *)out = result0;\n\n            result0 = dx_8 * r1;\n\n            r2 = ref[4];\n            r0 = r0 >> 16;\n            r1 = r0 | (r2 << 16);\n            result0 += dx * r1;\n            *(int32 *)(out + 16) = result0;\n\n            ref += 4;\n            out += 4;\n            r0 = r2;\n        }\n        pRef += srcPitch;\n        out += (32 - blkwidth);\n    }\n\n//  pRef -= srcPitch*(blkheight+1);\n    ref = temp;\n\n    for (j = 0; j < blkwidth; j += 4)\n    {\n        r0 = *(int32 *)ref;\n        r1 = *(int32 *)(ref + 16);\n        ref += 32;\n        out = pOut;\n        for (i = 0; i < (blkheight >> 1); i++)\n        {\n            result0 = dy_8 * r0 + 0x00200020;\n            r2 = *(int32 *)ref;\n            result0 += dy * r2;\n            result0 >>= 6;\n            result0 &= 0x00FF00FF;\n            r0 = r2;\n\n            result1 = dy_8 * r1 + 0x00200020;\n            r3 = *(int32 *)(ref + 16);\n            result1 += dy * r3;\n            result1 >>= 6;\n            result1 &= 0x00FF00FF;\n            r1 = r3;\n            *(int32 *)out = result0 | (result1 << 8);\n            out += predPitch;\n            ref += 32;\n\n            result0 = dy_8 * r0 + 0x00200020;\n            r2 = *(int32 *)ref;\n            result0 += dy * r2;\n            result0 >>= 6;\n            result0 &= 0x00FF00FF;\n            r0 = r2;\n\n            result1 = dy_8 * r1 + 0x00200020;\n            r3 = *(int32 *)(ref + 16);\n            result1 += dy * r3;\n            result1 >>= 6;\n            result1 &= 0x00FF00FF;\n            r1 = r3;\n            *(int32 *)out = result0 | (result1 << 8);\n            out += predPitch;\n            ref += 32;\n        }\n        pOut += 4;\n        ref = temp + 4; /* since it can only iterate twice max */\n    }\n    return;\n}\n\nvoid eChromaHorizontalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                              uint8 *pOut, int predPitch, int blkwidth, int blkheight)\n{\n    (void)(dy);\n\n    int32 r0, r1, r2, result0, result1;\n    uint8 *ref, *out;\n    int i, j;\n    int dx_8 = 8 - dx;\n\n    /* horizontal first */\n    for (i = 0; i < blkheight; i++)\n    {\n        ref = pRef;\n        out = pOut;\n\n        r0 = ref[0];\n        for (j = 0; j < blkwidth; j += 4)\n        {\n            r0 |= (ref[2] << 16);\n            result0 = dx_8 * r0 + 0x00040004;\n\n            r1 = ref[1] | (ref[3] << 16);\n            result0 += dx * r1;\n            result0 >>= 3;\n            result0 &= 0x00FF00FF;\n\n            result1 = dx_8 * r1 + 0x00040004;\n\n            r2 = ref[4];\n            r0 = r0 >> 16;\n            r1 = r0 | (r2 << 16);\n            result1 += dx * r1;\n            result1 >>= 3;\n            result1 &= 0x00FF00FF;\n\n            *(int32 *)out = result0 | (result1 << 8);\n\n            ref += 4;\n            out += 4;\n            r0 = r2;\n        }\n\n        pRef += srcPitch;\n        pOut += predPitch;\n    }\n    return;\n}\n\nvoid eChromaVerticalMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                            uint8 *pOut, int predPitch, int blkwidth, int blkheight)\n{\n    (void)(dx);\n\n    int32 r0, r1, r2, r3, result0, result1;\n    int i, j;\n    uint8 *ref, *out;\n    int dy_8 = 8 - dy;\n    /* vertical first */\n    for (i = 0; i < blkwidth; i += 4)\n    {\n        ref = pRef;\n        out = pOut;\n\n        r0 = ref[0] | (ref[2] << 16);\n        r1 = ref[1] | (ref[3] << 16);\n        ref += srcPitch;\n        for (j = 0; j < blkheight; j++)\n        {\n            result0 = dy_8 * r0 + 0x00040004;\n            r2 = ref[0] | (ref[2] << 16);\n            result0 += dy * r2;\n            result0 >>= 3;\n            result0 &= 0x00FF00FF;\n            r0 = r2;\n\n            result1 = dy_8 * r1 + 0x00040004;\n            r3 = ref[1] | (ref[3] << 16);\n            result1 += dy * r3;\n            result1 >>= 3;\n            result1 &= 0x00FF00FF;\n            r1 = r3;\n            *(int32 *)out = result0 | (result1 << 8);\n            ref += srcPitch;\n            out += predPitch;\n        }\n        pOut += 4;\n        pRef += 4;\n    }\n    return;\n}\n\nvoid eChromaDiagonalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                             uint8 *pOut,  int predPitch, int blkwidth, int blkheight)\n{\n    (void)(blkwidth);\n\n    int32 r0, r1, temp0, temp1, result;\n    int32 temp[9];\n    int32 *out;\n    int i, r_temp;\n    int dy_8 = 8 - dy;\n\n    /* horizontal first */\n    out = temp;\n    for (i = 0; i < blkheight + 1; i++)\n    {\n        r_temp = pRef[1];\n        temp0 = (pRef[0] << 3) + dx * (r_temp - pRef[0]);\n        temp1 = (r_temp << 3) + dx * (pRef[2] - r_temp);\n        r0 = temp0 | (temp1 << 16);\n        *out++ = r0;\n        pRef += srcPitch;\n    }\n\n    pRef -= srcPitch * (blkheight + 1);\n\n    out = temp;\n\n    r0 = *out++;\n\n    for (i = 0; i < blkheight; i++)\n    {\n        result = dy_8 * r0 + 0x00200020;\n        r1 = *out++;\n        result += dy * r1;\n        result >>= 6;\n        result &= 0x00FF00FF;\n        *(int16 *)pOut = (result >> 8) | (result & 0xFF);\n        r0 = r1;\n        pOut += predPitch;\n    }\n    return;\n}\n\nvoid eChromaHorizontalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                               uint8 *pOut, int predPitch, int blkwidth, int blkheight)\n{\n    (void)(dy);\n    (void)(blkwidth);\n\n    int i, temp, temp0, temp1;\n\n    /* horizontal first */\n    for (i = 0; i < blkheight; i++)\n    {\n        temp = pRef[1];\n        temp0 = ((pRef[0] << 3) + dx * (temp - pRef[0]) + 4) >> 3;\n        temp1 = ((temp << 3) + dx * (pRef[2] - temp) + 4) >> 3;\n\n        *(int16 *)pOut = temp0 | (temp1 << 8);\n        pRef += srcPitch;\n        pOut += predPitch;\n\n    }\n    return;\n}\nvoid eChromaVerticalMC2_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                             uint8 *pOut, int predPitch, int blkwidth, int blkheight)\n{\n    (void)(dx);\n    (void)(blkwidth);\n\n    int32 r0, r1, result;\n    int i;\n    int dy_8 = 8 - dy;\n    r0 = pRef[0] | (pRef[1] << 16);\n    pRef += srcPitch;\n    for (i = 0; i < blkheight; i++)\n    {\n        result = dy_8 * r0 + 0x00040004;\n        r1 = pRef[0] | (pRef[1] << 16);\n        result += dy * r1;\n        result >>= 3;\n        result &= 0x00FF00FF;\n        *(int16 *)pOut = (result >> 8) | (result & 0xFF);\n        r0 = r1;\n        pRef += srcPitch;\n        pOut += predPitch;\n    }\n    return;\n}\n\nvoid eChromaFullMC_SIMD(uint8 *pRef, int srcPitch, int dx, int dy,\n                        uint8 *pOut, int predPitch, int blkwidth, int blkheight)\n{\n    (void)(dx);\n    (void)(dy);\n\n    int i, j;\n    int offset_in = srcPitch - blkwidth;\n    int offset_out = predPitch - blkwidth;\n    uint16 temp;\n    uint8 byte;\n\n    if (((uint32)pRef)&1)\n    {\n        for (j = blkheight; j > 0; j--)\n        {\n            for (i = blkwidth; i > 0; i -= 2)\n            {\n                temp = *pRef++;\n                byte = *pRef++;\n                temp |= (byte << 8);\n                *((uint16*)pOut) = temp; /* write 2 bytes */\n                pOut += 2;\n            }\n            pOut += offset_out;\n            pRef += offset_in;\n        }\n    }\n    else\n    {\n        for (j = blkheight; j > 0; j--)\n        {\n            for (i = blkwidth; i > 0; i -= 2)\n            {\n                temp = *((uint16*)pRef);\n                *((uint16*)pOut) = temp;\n                pRef += 2;\n                pOut += 2;\n            }\n            pOut += offset_out;\n            pRef += offset_in;\n        }\n    }\n    return ;\n}\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/motion_est.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2010 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"oscl_mem.h\"\n#include \"avcenc_lib.h\"\n\n#define MIN_GOP     1   /* minimum size of GOP, 1/23/01, need to be tested */\n\n#define DEFAULT_REF_IDX     0  /* always from the first frame in the reflist */\n\n#define ALL_CAND_EQUAL  10  /*  any number greater than 5 will work */\n\n\n/* from TMN 3.2 */\n#define PREF_NULL_VEC 129   /* zero vector bias */\n#define PREF_16_VEC 129     /* 1MV bias versus 4MVs*/\n#define PREF_INTRA  3024//512       /* bias for INTRA coding */\n\nconst static int tab_exclude[9][9] =  // [last_loc][curr_loc]\n{\n    {0, 0, 0, 0, 0, 0, 0, 0, 0},\n    {0, 0, 0, 0, 1, 1, 1, 0, 0},\n    {0, 0, 0, 0, 1, 1, 1, 1, 1},\n    {0, 0, 0, 0, 0, 0, 1, 1, 1},\n    {0, 1, 1, 0, 0, 0, 1, 1, 1},\n    {0, 1, 1, 0, 0, 0, 0, 0, 1},\n    {0, 1, 1, 1, 1, 0, 0, 0, 1},\n    {0, 0, 1, 1, 1, 0, 0, 0, 0},\n    {0, 0, 1, 1, 1, 1, 1, 0, 0}\n}; //to decide whether to continue or compute\n\nconst static int refine_next[8][2] =    /* [curr_k][increment] */\n{\n    {0, 0}, {2, 0}, {1, 1}, {0, 2}, { -1, 1}, { -2, 0}, { -1, -1}, {0, -2}\n};\n\n#ifdef _SAD_STAT\nuint32 num_MB = 0;\nuint32 num_cand = 0;\n#endif\n\n/************************************************************************/\n#define TH_INTER_2  100  /* temporary for now */\n\n//#define FIXED_INTERPRED_MODE  AVC_P16\n#define FIXED_REF_IDX   0\n#define FIXED_MVX 0\n#define FIXED_MVY 0\n\n// only use when AVC_P8 or AVC_P8ref0\n#define FIXED_SUBMB_MODE    AVC_4x4\n/*************************************************************************/\n\n/* Initialize arrays necessary for motion search */\nAVCEnc_Status InitMotionSearchModule(AVCHandle *avcHandle)\n{\n    AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject;\n    AVCRateControl *rateCtrl = encvid->rateCtrl;\n    int search_range = rateCtrl->mvRange;\n    int number_of_subpel_positions = 4 * (2 * search_range + 3);\n    int max_mv_bits, max_mvd;\n    int temp_bits = 0;\n    uint8 *mvbits;\n    int bits, imax, imin, i;\n    uint8* subpel_pred = (uint8*) encvid->subpel_pred; // all 16 sub-pel positions\n\n\n    while (number_of_subpel_positions > 0)\n    {\n        temp_bits++;\n        number_of_subpel_positions >>= 1;\n    }\n\n    max_mv_bits = 3 + 2 * temp_bits;\n    max_mvd  = (1 << (max_mv_bits >> 1)) - 1;\n\n    encvid->mvbits_array = (uint8*) avcHandle->CBAVC_Malloc(encvid->avcHandle->userData,\n                           sizeof(uint8) * (2 * max_mvd + 1), DEFAULT_ATTR);\n\n    if (encvid->mvbits_array == NULL)\n    {\n        return AVCENC_MEMORY_FAIL;\n    }\n\n    mvbits = encvid->mvbits  = encvid->mvbits_array + max_mvd;\n\n    mvbits[0] = 1;\n    for (bits = 3; bits <= max_mv_bits; bits += 2)\n    {\n        imax = 1    << (bits >> 1);\n        imin = imax >> 1;\n\n        for (i = imin; i < imax; i++)   mvbits[-i] = mvbits[i] = bits;\n    }\n\n    /* initialize half-pel search */\n    encvid->hpel_cand[0] = subpel_pred + REF_CENTER;\n    encvid->hpel_cand[1] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 1 ;\n    encvid->hpel_cand[2] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 1;\n    encvid->hpel_cand[3] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 25;\n    encvid->hpel_cand[4] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 25;\n    encvid->hpel_cand[5] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 25;\n    encvid->hpel_cand[6] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;\n    encvid->hpel_cand[7] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;\n    encvid->hpel_cand[8] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE;\n\n    /* For quarter-pel interpolation around best half-pel result */\n\n    encvid->bilin_base[0][0] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE;\n    encvid->bilin_base[0][1] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 1;\n    encvid->bilin_base[0][2] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;\n    encvid->bilin_base[0][3] = subpel_pred + REF_CENTER;\n\n\n    encvid->bilin_base[1][0] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE;\n    encvid->bilin_base[1][1] = subpel_pred + REF_CENTER - 24;\n    encvid->bilin_base[1][2] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE;\n    encvid->bilin_base[1][3] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 1;\n\n    encvid->bilin_base[2][0] = subpel_pred + REF_CENTER - 24;\n    encvid->bilin_base[2][1] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 1;\n    encvid->bilin_base[2][2] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 1;\n    encvid->bilin_base[2][3] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 1;\n\n    encvid->bilin_base[3][0] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 1;\n    encvid->bilin_base[3][1] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 1;\n    encvid->bilin_base[3][2] = subpel_pred + REF_CENTER;\n    encvid->bilin_base[3][3] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 25;\n\n    encvid->bilin_base[4][0] = subpel_pred + REF_CENTER;\n    encvid->bilin_base[4][1] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 25;\n    encvid->bilin_base[4][2] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 25;\n    encvid->bilin_base[4][3] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 25;\n\n    encvid->bilin_base[5][0] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;\n    encvid->bilin_base[5][1] = subpel_pred + REF_CENTER;\n    encvid->bilin_base[5][2] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;\n    encvid->bilin_base[5][3] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 25;\n\n    encvid->bilin_base[6][0] = subpel_pred + REF_CENTER - 1;\n    encvid->bilin_base[6][1] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;\n    encvid->bilin_base[6][2] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE + 24;\n    encvid->bilin_base[6][3] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;\n\n    encvid->bilin_base[7][0] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE;\n    encvid->bilin_base[7][1] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE;\n    encvid->bilin_base[7][2] = subpel_pred + REF_CENTER - 1;\n    encvid->bilin_base[7][3] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE + 24;\n\n    encvid->bilin_base[8][0] = subpel_pred + REF_CENTER - 25;\n    encvid->bilin_base[8][1] = subpel_pred + V0Q_H2Q * SUBPEL_PRED_BLK_SIZE;\n    encvid->bilin_base[8][2] = subpel_pred + V2Q_H0Q * SUBPEL_PRED_BLK_SIZE;\n    encvid->bilin_base[8][3] = subpel_pred + V2Q_H2Q * SUBPEL_PRED_BLK_SIZE;\n\n\n    return AVCENC_SUCCESS;\n}\n\n/* Clean-up memory */\nvoid CleanMotionSearchModule(AVCHandle *avcHandle)\n{\n    AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject;\n\n    if (encvid->mvbits_array)\n    {\n        avcHandle->CBAVC_Free(avcHandle->userData, (int)(encvid->mvbits_array));\n        encvid->mvbits = NULL;\n    }\n\n    return ;\n}\n\n\nbool IntraDecisionABE(int *min_cost, uint8 *cur, int pitch, bool ave)\n{\n    int j;\n    uint8 *out;\n    int temp, SBE;\n    OsclFloat ABE;\n    bool intra = true;\n\n    SBE = 0;\n    /* top neighbor */\n    out = cur - pitch;\n    for (j = 0; j < 16; j++)\n    {\n        temp = out[j] - cur[j];\n        SBE += ((temp >= 0) ? temp : -temp);\n    }\n\n    /* left neighbor */\n    out = cur - 1;\n    out -= pitch;\n    cur -= pitch;\n    for (j = 0; j < 16; j++)\n    {\n        temp = *(out += pitch) - *(cur += pitch);\n        SBE += ((temp >= 0) ? temp : -temp);\n    }\n\n    /* compare mincost/384 and SBE/64 */\n    ABE = SBE / 32.0; //ABE = SBE/64.0; //\n    if (ABE >= *min_cost / 256.0) //if( ABE*0.8 >= min_cost/384.0) //\n    {\n        intra = false; // no possibility of intra, just use inter\n    }\n    else\n    {\n        if (ave == true)\n        {\n            *min_cost = (*min_cost + (int)(SBE * 8)) >> 1; // possibility of intra, averaging the cost\n        }\n        else\n        {\n            *min_cost = (int)(SBE * 8);\n        }\n    }\n\n    return intra;\n}\n\n/******* main function for macroblock prediction for the entire frame ***/\n/* if turns out to be IDR frame, set video->nal_unit_type to AVC_NALTYPE_IDR */\nvoid AVCMotionEstimation(AVCEncObject *encvid)\n{\n    AVCCommonObj *video = encvid->common;\n    int slice_type = video->slice_type;\n    AVCFrameIO *currInput = encvid->currInput;\n    AVCPictureData *refPic = video->RefPicList0[0];\n    int i, j, k;\n    int mbwidth = video->PicWidthInMbs;\n    int mbheight = video->PicHeightInMbs;\n    int totalMB = video->PicSizeInMbs;\n    int pitch = currInput->pitch;\n    AVCMacroblock *currMB, *mblock = video->mblock;\n    AVCMV *mot_mb_16x16, *mot16x16 = encvid->mot16x16;\n    // AVCMV *mot_mb_16x8, *mot_mb_8x16, *mot_mb_8x8, etc;\n    AVCRateControl *rateCtrl = encvid->rateCtrl;\n    uint8 *intraSearch = encvid->intraSearch;\n    uint FS_en = encvid->fullsearch_enable;\n\n    int NumIntraSearch, start_i, numLoop, incr_i;\n    int mbnum, offset;\n    uint8 *cur, *best_cand[5];\n    int totalSAD = 0;   /* average SAD for rate control */\n    int type_pred;\n    int abe_cost;\n\n#ifdef HTFM\n    /***** HYPOTHESIS TESTING ********/  /* 2/28/01 */\n    int collect = 0;\n    HTFM_Stat htfm_stat;\n    double newvar[16];\n    double exp_lamda[15];\n    /*********************************/\n#endif\n    int hp_guess = 0;\n    uint32 mv_uint32;\n\n    offset = 0;\n\n    if (slice_type == AVC_I_SLICE) // need to calculate rateCtrl->totalSAD for RC to take action!!\n    {\n        /* cannot do I16 prediction here because it needs full decoding. */\n        {   /* no RC for I-slice */\n\n            i = totalMB - 1;\n            while (i >= 0)\n            {\n                encvid->min_cost[i--] = 0x7FFFFFFF;  /* max value for int */\n            }\n        }\n\n        /* reset intra MB pattern */\n        oscl_memset(intraSearch, 1, sizeof(uint8)*totalMB);\n\n        encvid->firstIntraRefreshMBIndx = 0; /* reset this */\n\n        return ;\n    }\n    else   // P_SLICE\n    {\n        for (i = 0; i < totalMB; i++)\n        {\n            mblock[i].mb_intra = 0;\n        }\n        oscl_memset(intraSearch, 1, sizeof(uint8)*totalMB);\n    }\n\n    if (refPic->padded == 0)\n    {\n        AVCPaddingEdge(refPic);\n        refPic->padded = 1;\n    }\n    /* Random INTRA update */\n    if (rateCtrl->intraMBRate)\n    {\n        AVCRasterIntraUpdate(encvid, mblock, totalMB, rateCtrl->intraMBRate);\n    }\n\n    encvid->sad_extra_info = NULL;\n#ifdef HTFM\n    /***** HYPOTHESIS TESTING ********/\n    InitHTFM(video, &htfm_stat, newvar, &collect);\n    /*********************************/\n#endif\n\n    if ((rateCtrl->scdEnable == 1)\n            && ((rateCtrl->frame_rate < 5.0) || (video->sliceHdr->frame_num > MIN_GOP)))\n        /* do not try to detect a new scene if low frame rate and too close to previous I-frame */\n    {\n        incr_i = 2;\n        numLoop = 2;\n        start_i = 1;\n        type_pred = 0; /* for initial candidate selection */\n    }\n    else\n    {\n        incr_i = 1;\n        numLoop = 1;\n        start_i = 0;\n        type_pred = 2;\n    }\n\n    /* First pass, loop thru half the macroblock */\n    /* determine scene change */\n    /* Second pass, for the rest of macroblocks */\n    NumIntraSearch = 0; // to be intra searched in the encoding loop.\n    while (numLoop--)\n    {\n        for (j = 0; j < mbheight; j++)\n        {\n            if (incr_i > 1)\n                start_i = (start_i == 0 ? 1 : 0) ; /* toggle 0 and 1 */\n\n            offset = pitch * (j << 4) + (start_i << 4);\n\n            mbnum = j * mbwidth + start_i;\n\n            for (i = start_i; i < mbwidth; i += incr_i)\n            {\n                video->mbNum = mbnum;\n                video->currMB = currMB = mblock + mbnum;\n                mot_mb_16x16 = mot16x16 + mbnum;\n\n                cur = currInput->YCbCr[0] + offset;\n\n                if (currMB->mb_intra == 0) /* for INTER mode */\n                {\n#if defined(HTFM)\n                    HTFMPrepareCurMB_AVC(encvid, &htfm_stat, cur, pitch);\n#else\n                    AVCPrepareCurMB(encvid, cur, pitch);\n#endif\n                    /************************************************************/\n                    /******** full-pel 1MV search **********************/\n\n                    AVCMBMotionSearch(encvid, cur, best_cand, i << 4, j << 4, type_pred,\n                                      FS_en, &hp_guess);\n\n                    abe_cost = encvid->min_cost[mbnum] = mot_mb_16x16->sad;\n\n                    /* set mbMode and MVs */\n                    currMB->mbMode = AVC_P16;\n                    currMB->MBPartPredMode[0][0] = AVC_Pred_L0;\n                    mv_uint32 = ((mot_mb_16x16->y) << 16) | ((mot_mb_16x16->x) & 0xffff);\n                    for (k = 0; k < 32; k += 2)\n                    {\n                        currMB->mvL0[k>>1] = mv_uint32;\n                    }\n\n                    /* make a decision whether it should be tested for intra or not */\n                    if (i != mbwidth - 1 && j != mbheight - 1 && i != 0 && j != 0)\n                    {\n                        if (false == IntraDecisionABE(&abe_cost, cur, pitch, true))\n                        {\n                            intraSearch[mbnum] = 0;\n                        }\n                        else\n                        {\n                            NumIntraSearch++;\n                            rateCtrl->MADofMB[mbnum] = abe_cost;\n                        }\n                    }\n                    else // boundary MBs, always do intra search\n                    {\n                        NumIntraSearch++;\n                    }\n\n                    totalSAD += (int) rateCtrl->MADofMB[mbnum];//mot_mb_16x16->sad;\n                }\n                else    /* INTRA update, use for prediction */\n                {\n                    mot_mb_16x16[0].x = mot_mb_16x16[0].y = 0;\n\n                    /* reset all other MVs to zero */\n                    /* mot_mb_16x8, mot_mb_8x16, mot_mb_8x8, etc. */\n                    abe_cost = encvid->min_cost[mbnum] = 0x7FFFFFFF;  /* max value for int */\n\n                    if (i != mbwidth - 1 && j != mbheight - 1 && i != 0 && j != 0)\n                    {\n                        IntraDecisionABE(&abe_cost, cur, pitch, false);\n\n                        rateCtrl->MADofMB[mbnum] = abe_cost;\n                        totalSAD += abe_cost;\n                    }\n\n                    NumIntraSearch++ ;\n                    /* cannot do I16 prediction here because it needs full decoding. */\n                    // intraSearch[mbnum] = 1;\n\n                }\n\n                mbnum += incr_i;\n                offset += (incr_i << 4);\n\n            } /* for i */\n        } /* for j */\n\n        /* since we cannot do intra/inter decision here, the SCD has to be\n        based on other criteria such as motion vectors coherency or the SAD */\n        if (incr_i > 1 && numLoop) /* scene change on and first loop */\n        {\n            //if(NumIntraSearch > ((totalMB>>3)<<1) + (totalMB>>3)) /* 75% of 50%MBs */\n            if (NumIntraSearch*99 > (48*totalMB)) /* 20% of 50%MBs */\n                /* need to do more investigation about this threshold since the NumIntraSearch\n                only show potential intra MBs, not the actual one */\n            {\n                /* we can choose to just encode I_SLICE without IDR */\n                //video->nal_unit_type = AVC_NALTYPE_IDR;\n                video->nal_unit_type = AVC_NALTYPE_SLICE;\n                video->sliceHdr->slice_type = AVC_I_ALL_SLICE;\n                video->slice_type = AVC_I_SLICE;\n                oscl_memset(intraSearch, 1, sizeof(uint8)*totalMB);\n                i = totalMB;\n                while (i--)\n                {\n                    mblock[i].mb_intra = 1;\n                    encvid->min_cost[i] = 0x7FFFFFFF;  /* max value for int */\n                }\n\n                rateCtrl->totalSAD = totalSAD * 2;  /* SAD */\n\n                return ;\n            }\n        }\n        /******** no scene change, continue motion search **********************/\n        start_i = 0;\n        type_pred++; /* second pass */\n    }\n\n    rateCtrl->totalSAD = totalSAD;  /* SAD */\n\n#ifdef HTFM\n    /***** HYPOTHESIS TESTING ********/\n    if (collect)\n    {\n        collect = 0;\n        UpdateHTFM(encvid, newvar, exp_lamda, &htfm_stat);\n    }\n    /*********************************/\n#endif\n\n    return ;\n}\n\n/*=====================================================================\n    Function:   PaddingEdge\n    Date:       09/16/2000\n    Purpose:    Pad edge of a Vop\n=====================================================================*/\n\nvoid  AVCPaddingEdge(AVCPictureData *refPic)\n{\n    uint8 *src, *dst;\n    int i;\n    int pitch, width, height;\n    uint32 temp1, temp2;\n\n    width = refPic->width;\n    height = refPic->height;\n    pitch = refPic->pitch;\n\n    /* pad top */\n    src = refPic->Sl;\n\n    temp1 = *src; /* top-left corner */\n    temp2 = src[width-1]; /* top-right corner */\n    temp1 |= (temp1 << 8);\n    temp1 |= (temp1 << 16);\n    temp2 |= (temp2 << 8);\n    temp2 |= (temp2 << 16);\n\n    dst = src - (pitch << 4);\n\n    *((uint32*)(dst - 16)) = temp1;\n    *((uint32*)(dst - 12)) = temp1;\n    *((uint32*)(dst - 8)) = temp1;\n    *((uint32*)(dst - 4)) = temp1;\n\n    oscl_memcpy(dst, src, width);\n\n    *((uint32*)(dst += width)) = temp2;\n    *((uint32*)(dst + 4)) = temp2;\n    *((uint32*)(dst + 8)) = temp2;\n    *((uint32*)(dst + 12)) = temp2;\n\n    dst = dst - width - 16;\n\n    i = 15;\n    while (i--)\n    {\n        oscl_memcpy(dst + pitch, dst, pitch);\n        dst += pitch;\n    }\n\n    /* pad sides */\n    dst += (pitch + 16);\n    src = dst;\n    i = height;\n    while (i--)\n    {\n        temp1 = *src;\n        temp2 = src[width-1];\n        temp1 |= (temp1 << 8);\n        temp1 |= (temp1 << 16);\n        temp2 |= (temp2 << 8);\n        temp2 |= (temp2 << 16);\n\n        *((uint32*)(dst - 16)) = temp1;\n        *((uint32*)(dst - 12)) = temp1;\n        *((uint32*)(dst - 8)) = temp1;\n        *((uint32*)(dst - 4)) = temp1;\n\n        *((uint32*)(dst += width)) = temp2;\n        *((uint32*)(dst + 4)) = temp2;\n        *((uint32*)(dst + 8)) = temp2;\n        *((uint32*)(dst + 12)) = temp2;\n\n        src += pitch;\n        dst = src;\n    }\n\n    /* pad bottom */\n    dst -= 16;\n    i = 16;\n    while (i--)\n    {\n        oscl_memcpy(dst, dst - pitch, pitch);\n        dst += pitch;\n    }\n\n\n    return ;\n}\n\n/*===========================================================================\n    Function:   AVCRasterIntraUpdate\n    Date:       2/26/01\n    Purpose:    To raster-scan assign INTRA-update .\n                N macroblocks are updated (also was programmable).\n===========================================================================*/\nvoid AVCRasterIntraUpdate(AVCEncObject *encvid, AVCMacroblock *mblock, int totalMB, int numRefresh)\n{\n    int indx, i;\n\n    indx = encvid->firstIntraRefreshMBIndx;\n    for (i = 0; i < numRefresh && indx < totalMB; i++)\n    {\n        (mblock + indx)->mb_intra = 1;\n        encvid->intraSearch[indx++] = 1;\n    }\n\n    /* if read the end of frame, reset and loop around */\n    if (indx >= totalMB - 1)\n    {\n        indx = 0;\n        while (i < numRefresh && indx < totalMB)\n        {\n            (mblock + indx)->mb_intra = 1;\n            encvid->intraSearch[indx++] = 1;\n            i++;\n        }\n    }\n\n    encvid->firstIntraRefreshMBIndx = indx; /* update with a new value */\n\n    return ;\n}\n\n\n#ifdef HTFM\nvoid InitHTFM(VideoEncData *encvid, HTFM_Stat *htfm_stat, double *newvar, int *collect)\n{\n    AVCCommonObj *video = encvid->common;\n    int i;\n    int lx = video->currPic->width; // padding\n    int lx2 = lx << 1;\n    int lx3 = lx2 + lx;\n    int rx = video->currPic->pitch;\n    int rx2 = rx << 1;\n    int rx3 = rx2 + rx;\n\n    int *offset, *offset2;\n\n    /* 4/11/01, collect data every 30 frames, doesn't have to be base layer */\n    if (((int)video->sliceHdr->frame_num) % 30 == 1)\n    {\n\n        *collect = 1;\n\n        htfm_stat->countbreak = 0;\n        htfm_stat->abs_dif_mad_avg = 0;\n\n        for (i = 0; i < 16; i++)\n        {\n            newvar[i] = 0.0;\n        }\n//      encvid->functionPointer->SAD_MB_PADDING = &SAD_MB_PADDING_HTFM_Collect;\n        encvid->functionPointer->SAD_Macroblock = &SAD_MB_HTFM_Collect;\n        encvid->functionPointer->SAD_MB_HalfPel[0] = NULL;\n        encvid->functionPointer->SAD_MB_HalfPel[1] = &SAD_MB_HP_HTFM_Collectxh;\n        encvid->functionPointer->SAD_MB_HalfPel[2] = &SAD_MB_HP_HTFM_Collectyh;\n        encvid->functionPointer->SAD_MB_HalfPel[3] = &SAD_MB_HP_HTFM_Collectxhyh;\n        encvid->sad_extra_info = (void*)(htfm_stat);\n        offset = htfm_stat->offsetArray;\n        offset2 = htfm_stat->offsetRef;\n    }\n    else\n    {\n//      encvid->functionPointer->SAD_MB_PADDING = &SAD_MB_PADDING_HTFM;\n        encvid->functionPointer->SAD_Macroblock = &SAD_MB_HTFM;\n        encvid->functionPointer->SAD_MB_HalfPel[0] = NULL;\n        encvid->functionPointer->SAD_MB_HalfPel[1] = &SAD_MB_HP_HTFMxh;\n        encvid->functionPointer->SAD_MB_HalfPel[2] = &SAD_MB_HP_HTFMyh;\n        encvid->functionPointer->SAD_MB_HalfPel[3] = &SAD_MB_HP_HTFMxhyh;\n        encvid->sad_extra_info = (void*)(encvid->nrmlz_th);\n        offset = encvid->nrmlz_th + 16;\n        offset2 = encvid->nrmlz_th + 32;\n    }\n\n    offset[0] = 0;\n    offset[1] = lx2 + 2;\n    offset[2] = 2;\n    offset[3] = lx2;\n    offset[4] = lx + 1;\n    offset[5] = lx3 + 3;\n    offset[6] = lx + 3;\n    offset[7] = lx3 + 1;\n    offset[8] = lx;\n    offset[9] = lx3 + 2;\n    offset[10] = lx3 ;\n    offset[11] = lx + 2 ;\n    offset[12] = 1;\n    offset[13] = lx2 + 3;\n    offset[14] = lx2 + 1;\n    offset[15] = 3;\n\n    offset2[0] = 0;\n    offset2[1] = rx2 + 2;\n    offset2[2] = 2;\n    offset2[3] = rx2;\n    offset2[4] = rx + 1;\n    offset2[5] = rx3 + 3;\n    offset2[6] = rx + 3;\n    offset2[7] = rx3 + 1;\n    offset2[8] = rx;\n    offset2[9] = rx3 + 2;\n    offset2[10] = rx3 ;\n    offset2[11] = rx + 2 ;\n    offset2[12] = 1;\n    offset2[13] = rx2 + 3;\n    offset2[14] = rx2 + 1;\n    offset2[15] = 3;\n\n    return ;\n}\n\nvoid UpdateHTFM(AVCEncObject *encvid, double *newvar, double *exp_lamda, HTFM_Stat *htfm_stat)\n{\n    if (htfm_stat->countbreak == 0)\n        htfm_stat->countbreak = 1;\n\n    newvar[0] = (double)(htfm_stat->abs_dif_mad_avg) / (htfm_stat->countbreak * 16.);\n\n    if (newvar[0] < 0.001)\n    {\n        newvar[0] = 0.001; /* to prevent floating overflow */\n    }\n    exp_lamda[0] =  1 / (newvar[0] * 1.4142136);\n    exp_lamda[1] = exp_lamda[0] * 1.5825;\n    exp_lamda[2] = exp_lamda[0] * 2.1750;\n    exp_lamda[3] = exp_lamda[0] * 3.5065;\n    exp_lamda[4] = exp_lamda[0] * 3.1436;\n    exp_lamda[5] = exp_lamda[0] * 3.5315;\n    exp_lamda[6] = exp_lamda[0] * 3.7449;\n    exp_lamda[7] = exp_lamda[0] * 4.5854;\n    exp_lamda[8] = exp_lamda[0] * 4.6191;\n    exp_lamda[9] = exp_lamda[0] * 5.4041;\n    exp_lamda[10] = exp_lamda[0] * 6.5974;\n    exp_lamda[11] = exp_lamda[0] * 10.5341;\n    exp_lamda[12] = exp_lamda[0] * 10.0719;\n    exp_lamda[13] = exp_lamda[0] * 12.0516;\n    exp_lamda[14] = exp_lamda[0] * 15.4552;\n\n    CalcThreshold(HTFM_Pf, exp_lamda, encvid->nrmlz_th);\n    return ;\n}\n\n\nvoid CalcThreshold(double pf, double exp_lamda[], int nrmlz_th[])\n{\n    int i;\n    double temp[15];\n    //  printf(\"\\nLamda: \");\n\n    /* parametric PREMODELling */\n    for (i = 0; i < 15; i++)\n    {\n        //    printf(\"%g \",exp_lamda[i]);\n        if (pf < 0.5)\n            temp[i] = 1 / exp_lamda[i] * M4VENC_LOG(2 * pf);\n        else\n            temp[i] = -1 / exp_lamda[i] * M4VENC_LOG(2 * (1 - pf));\n    }\n\n    nrmlz_th[15] = 0;\n    for (i = 0; i < 15; i++)        /* scale upto no.pixels */\n        nrmlz_th[i] = (int)(temp[i] * ((i + 1) << 4) + 0.5);\n\n    return ;\n}\n\nvoid    HTFMPrepareCurMB_AVC(AVCEncObject *encvid, HTFM_Stat *htfm_stat, uint8 *cur, int pitch)\n{\n    AVCCommonObj *video = encvid->common;\n    uint32 *htfmMB = (uint32*)(encvid->currYMB);\n    uint8 *ptr, byte;\n    int *offset;\n    int i;\n    uint32 word;\n\n    if (((int)video->sliceHdr->frame_num) % 30 == 1)\n    {\n        offset = htfm_stat->offsetArray;\n    }\n    else\n    {\n        offset = encvid->nrmlz_th + 16;\n    }\n\n    for (i = 0; i < 16; i++)\n    {\n        ptr = cur + offset[i];\n        word = ptr[0];\n        byte = ptr[4];\n        word |= (byte << 8);\n        byte = ptr[8];\n        word |= (byte << 16);\n        byte = ptr[12];\n        word |= (byte << 24);\n        *htfmMB++ = word;\n\n        word = *(ptr += (pitch << 2));\n        byte = ptr[4];\n        word |= (byte << 8);\n        byte = ptr[8];\n        word |= (byte << 16);\n        byte = ptr[12];\n        word |= (byte << 24);\n        *htfmMB++ = word;\n\n        word = *(ptr += (pitch << 2));\n        byte = ptr[4];\n        word |= (byte << 8);\n        byte = ptr[8];\n        word |= (byte << 16);\n        byte = ptr[12];\n        word |= (byte << 24);\n        *htfmMB++ = word;\n\n        word = *(ptr += (pitch << 2));\n        byte = ptr[4];\n        word |= (byte << 8);\n        byte = ptr[8];\n        word |= (byte << 16);\n        byte = ptr[12];\n        word |= (byte << 24);\n        *htfmMB++ = word;\n    }\n\n    return ;\n}\n\n\n#endif // HTFM\n\nvoid    AVCPrepareCurMB(AVCEncObject *encvid, uint8 *cur, int pitch)\n{\n    void* tmp = (void*)(encvid->currYMB);\n    uint32 *currYMB = (uint32*) tmp;\n    int i;\n\n    cur -= pitch;\n\n    for (i = 0; i < 16; i++)\n    {\n        *currYMB++ = *((uint32*)(cur += pitch));\n        *currYMB++ = *((uint32*)(cur + 4));\n        *currYMB++ = *((uint32*)(cur + 8));\n        *currYMB++ = *((uint32*)(cur + 12));\n    }\n\n    return ;\n}\n\n#ifdef FIXED_INTERPRED_MODE\n\n/* due to the complexity of the predicted motion vector, we may not decide to skip\na macroblock here just yet. */\n/* We will find the best motion vector and the best intra prediction mode for each block. */\n/* output are\n    currMB->NumMbPart,  currMB->MbPartWidth, currMB->MbPartHeight,\n    currMB->NumSubMbPart[], currMB->SubMbPartWidth[], currMB->SubMbPartHeight,\n    currMB->MBPartPredMode[][] (L0 or L1 or BiPred)\n    currMB->RefIdx[], currMB->ref_idx_L0[],\n    currMB->mvL0[], currMB->mvL1[]\n    */\n\nAVCEnc_Status AVCMBMotionSearch(AVCEncObject *encvid, AVCMacroblock *currMB, int mbNum,\n                                int num_pass)\n{\n    AVCCommonObj *video = encvid->common;\n    int mbPartIdx, subMbPartIdx;\n    int16 *mv;\n    int i;\n    int SubMbPartHeight, SubMbPartWidth, NumSubMbPart;\n\n    /* assign value to currMB->MBPartPredMode[][x],subMbMode[],NumSubMbPart[],SubMbPartWidth[],SubMbPartHeight[] */\n\n    currMB->mbMode = FIXED_INTERPRED_MODE;\n    currMB->mb_intra = 0;\n\n    if (currMB->mbMode == AVC_P16)\n    {\n        currMB->NumMbPart = 1;\n        currMB->MbPartWidth = 16;\n        currMB->MbPartHeight = 16;\n        currMB->SubMbPartHeight[0] = 16;\n        currMB->SubMbPartWidth[0] = 16;\n        currMB->NumSubMbPart[0] =  1;\n    }\n    else if (currMB->mbMode == AVC_P16x8)\n    {\n        currMB->NumMbPart = 2;\n        currMB->MbPartWidth = 16;\n        currMB->MbPartHeight = 8;\n        for (i = 0; i < 2; i++)\n        {\n            currMB->SubMbPartWidth[i] = 16;\n            currMB->SubMbPartHeight[i] = 8;\n            currMB->NumSubMbPart[i] = 1;\n        }\n    }\n    else if (currMB->mbMode == AVC_P8x16)\n    {\n        currMB->NumMbPart = 2;\n        currMB->MbPartWidth = 8;\n        currMB->MbPartHeight = 16;\n        for (i = 0; i < 2; i++)\n        {\n            currMB->SubMbPartWidth[i] = 8;\n            currMB->SubMbPartHeight[i] = 16;\n            currMB->NumSubMbPart[i] = 1;\n        }\n    }\n    else if (currMB->mbMode == AVC_P8 || currMB->mbMode == AVC_P8ref0)\n    {\n        currMB->NumMbPart = 4;\n        currMB->MbPartWidth = 8;\n        currMB->MbPartHeight = 8;\n        if (FIXED_SUBMB_MODE == AVC_8x8)\n        {\n            SubMbPartHeight = 8;\n            SubMbPartWidth = 8;\n            NumSubMbPart = 1;\n        }\n        else if (FIXED_SUBMB_MODE == AVC_8x4)\n        {\n            SubMbPartHeight = 4;\n            SubMbPartWidth = 8;\n            NumSubMbPart = 2;\n        }\n        else if (FIXED_SUBMB_MODE == AVC_4x8)\n        {\n            SubMbPartHeight = 8;\n            SubMbPartWidth = 4;\n            NumSubMbPart = 2;\n        }\n        else if (FIXED_SUBMB_MODE == AVC_4x4)\n        {\n            SubMbPartHeight = 4;\n            SubMbPartWidth = 4;\n            NumSubMbPart = 4;\n        }\n\n        for (i = 0; i < 4; i++)\n        {\n            currMB->subMbMode[i] = FIXED_SUBMB_MODE;\n            currMB->SubMbPartHeight[i] = SubMbPartHeight;\n            currMB->SubMbPartWidth[i] = SubMbPartWidth;\n            currMB->NumSubMbPart[i] = NumSubMbPart;\n        }\n    }\n    else /* it's probably intra mode */\n    {\n        return AVCENC_SUCCESS;\n    }\n\n    for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)\n    {\n        currMB->MBPartPredMode[mbPartIdx][0]  = AVC_Pred_L0;\n        currMB->ref_idx_L0[mbPartIdx] = FIXED_REF_IDX;\n        currMB->RefIdx[mbPartIdx] = video->RefPicList0[FIXED_REF_IDX]->RefIdx;\n\n        for (subMbPartIdx = 0; subMbPartIdx < 4; subMbPartIdx++)\n        {\n            mv = (int16*)(currMB->mvL0 + (mbPartIdx << 2) + subMbPartIdx);\n\n            *mv++ = FIXED_MVX;\n            *mv = FIXED_MVY;\n        }\n    }\n\n    encvid->min_cost = 0;\n\n    return AVCENC_SUCCESS;\n}\n\n#else /* perform the search */\n\n/* This option #1 search is very similar to PV's MPEG4 motion search algorithm.\n  The search is done in hierarchical manner from 16x16 MB down to smaller and smaller\n  partition. At each level, a decision can be made to stop the search if the expected\n  prediction gain is not worth the computation. The decision can also be made at the finest\n  level for more fullsearch-like behavior with the price of heavier computation. */\nvoid AVCMBMotionSearch(AVCEncObject *encvid, uint8 *cur, uint8 *best_cand[],\n                       int i0, int j0, int type_pred, int FS_en, int *hp_guess)\n{\n    AVCCommonObj *video = encvid->common;\n    AVCPictureData *currPic = video->currPic;\n    AVCSeqParamSet *currSPS = video->currSeqParams;\n    AVCRateControl *rateCtrl = encvid->rateCtrl;\n    AVCMacroblock *currMB = video->currMB;\n    uint8 *ref, *cand, *ncand;\n    void *extra_info = encvid->sad_extra_info;\n    int mbnum = video->mbNum;\n    int width = currPic->width; /* 6/12/01, must be multiple of 16 */\n    int height = currPic->height;\n    AVCMV *mot16x16 = encvid->mot16x16;\n    int (*SAD_Macroblock)(uint8*, uint8*, int, void*) = encvid->functionPointer->SAD_Macroblock;\n\n    int range = rateCtrl->mvRange;\n\n    int lx = currPic->pitch; /*  padding */\n    int i, j, imin, jmin, ilow, ihigh, jlow, jhigh;\n    int d, dmin, dn[9];\n    int k;\n    int mvx[5], mvy[5];\n    int num_can, center_again;\n    int last_loc, new_loc = 0;\n    int step, max_step = range >> 1;\n    int next;\n\n    int cmvx, cmvy; /* estimated predicted MV */\n    int lev_idx;\n    int lambda_motion = encvid->lambda_motion;\n    uint8 *mvbits = encvid->mvbits;\n    int mvshift = 2;\n    int mvcost;\n\n    int min_sad = 65535;\n\n    ref = video->RefPicList0[DEFAULT_REF_IDX]->Sl; /* origin of actual frame */\n\n    /* have to initialize these params, necessary for interprediction part */\n    currMB->NumMbPart = 1;\n    currMB->SubMbPartHeight[0] = 16;\n    currMB->SubMbPartWidth[0] = 16;\n    currMB->NumSubMbPart[0] = 1;\n    currMB->ref_idx_L0[0] = currMB->ref_idx_L0[1] =\n                                currMB->ref_idx_L0[2] = currMB->ref_idx_L0[3] = DEFAULT_REF_IDX;\n    currMB->ref_idx_L1[0] = currMB->ref_idx_L1[1] =\n                                currMB->ref_idx_L1[2] = currMB->ref_idx_L1[3] = DEFAULT_REF_IDX;\n    currMB->RefIdx[0] = currMB->RefIdx[1] =\n                            currMB->RefIdx[2] = currMB->RefIdx[3] = video->RefPicList0[DEFAULT_REF_IDX]->RefIdx;\n\n    cur = encvid->currYMB; /* use smaller memory space for current MB */\n\n    /*  find limit of the search (adjusting search range)*/\n    lev_idx = mapLev2Idx[currSPS->level_idc];\n\n    /* we can make this part dynamic based on previous statistics */\n    ilow = i0 - range;\n    if (i0 - ilow > 2047) /* clip to conform with the standard */\n    {\n        ilow = i0 - 2047;\n    }\n    if (ilow < -13)  // change it from -15 to -13 because of 6-tap filter needs extra 2 lines.\n    {\n        ilow = -13;\n    }\n\n    ihigh = i0 + range - 1;\n    if (ihigh - i0 > 2047) /* clip to conform with the standard */\n    {\n        ihigh = i0 + 2047;\n    }\n    if (ihigh > width - 3)\n    {\n        ihigh = width - 3;  // change from width-1 to width-3 for the same reason as above\n    }\n\n    jlow = j0 - range;\n    if (j0 - jlow > MaxVmvR[lev_idx] - 1) /* clip to conform with the standard */\n    {\n        jlow = j0 - MaxVmvR[lev_idx] + 1;\n    }\n    if (jlow < -13)     // same reason as above\n    {\n        jlow = -13;\n    }\n\n    jhigh = j0 + range - 1;\n    if (jhigh - j0 > MaxVmvR[lev_idx] - 1) /* clip to conform with the standard */\n    {\n        jhigh = j0 + MaxVmvR[lev_idx] - 1;\n    }\n    if (jhigh > height - 3) // same reason as above\n    {\n        jhigh = height - 3;\n    }\n\n    /* find initial motion vector & predicted MV*/\n    AVCCandidateSelection(mvx, mvy, &num_can, i0 >> 4, j0 >> 4, encvid, type_pred, &cmvx, &cmvy);\n\n    imin = i0;\n    jmin = j0; /* needed for fullsearch */\n    ncand = ref + i0 + j0 * lx;\n\n    /* for first row of MB, fullsearch can be used */\n    if (FS_en)\n    {\n        *hp_guess = 0; /* no guess for fast half-pel */\n\n        dmin =  AVCFullSearch(encvid, ref, cur, &imin, &jmin, ilow, ihigh, jlow, jhigh, cmvx, cmvy);\n\n        ncand = ref + imin + jmin * lx;\n    }\n    else\n    {   /*       fullsearch the top row to only upto (0,3) MB */\n        /*       upto 30% complexity saving with the same complexity */\n        if (video->PrevRefFrameNum == 0 && j0 == 0 && i0 <= 64 && type_pred != 1)\n        {\n            *hp_guess = 0; /* no guess for fast half-pel */\n            dmin =  AVCFullSearch(encvid, ref, cur, &imin, &jmin, ilow, ihigh, jlow, jhigh, cmvx, cmvy);\n            ncand = ref + imin + jmin * lx;\n        }\n        else\n        {\n            /************** initialize candidate **************************/\n\n            dmin = 65535;\n\n            /* check if all are equal */\n            if (num_can == ALL_CAND_EQUAL)\n            {\n                i = i0 + mvx[0];\n                j = j0 + mvy[0];\n\n                if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh)\n                {\n                    cand = ref + i + j * lx;\n\n                    d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, extra_info);\n                    mvcost = MV_COST(lambda_motion, mvshift, i - i0, j - j0, cmvx, cmvy);\n                    d +=  mvcost;\n\n                    if (d < dmin)\n                    {\n                        dmin = d;\n                        imin = i;\n                        jmin = j;\n                        ncand = cand;\n                        min_sad = d - mvcost; // for rate control\n                    }\n                }\n            }\n            else\n            {\n                /************** evaluate unique candidates **********************/\n                for (k = 0; k < num_can; k++)\n                {\n                    i = i0 + mvx[k];\n                    j = j0 + mvy[k];\n\n                    if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh)\n                    {\n                        cand = ref + i + j * lx;\n                        d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, extra_info);\n                        mvcost = MV_COST(lambda_motion, mvshift, i - i0, j - j0, cmvx, cmvy);\n                        d +=  mvcost;\n\n                        if (d < dmin)\n                        {\n                            dmin = d;\n                            imin = i;\n                            jmin = j;\n                            ncand = cand;\n                            min_sad = d - mvcost; // for rate control\n                        }\n                    }\n                }\n            }\n\n            /******************* local refinement ***************************/\n            center_again = 0;\n            last_loc = new_loc = 0;\n            //          ncand = ref + jmin*lx + imin;  /* center of the search */\n            step = 0;\n            dn[0] = dmin;\n            while (!center_again && step <= max_step)\n            {\n\n                AVCMoveNeighborSAD(dn, last_loc);\n\n                center_again = 1;\n                i = imin;\n                j = jmin - 1;\n                cand = ref + i + j * lx;\n\n                /*  starting from [0,-1] */\n                /* spiral check one step at a time*/\n                for (k = 2; k <= 8; k += 2)\n                {\n                    if (!tab_exclude[last_loc][k]) /* exclude last step computation */\n                    {       /* not already computed */\n                        if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh)\n                        {\n                            d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, extra_info);\n                            mvcost = MV_COST(lambda_motion, mvshift, i - i0, j - j0, cmvx, cmvy);\n                            d += mvcost;\n\n                            dn[k] = d; /* keep it for half pel use */\n\n                            if (d < dmin)\n                            {\n                                ncand = cand;\n                                dmin = d;\n                                imin = i;\n                                jmin = j;\n                                center_again = 0;\n                                new_loc = k;\n                                min_sad = d - mvcost; // for rate control\n                            }\n                        }\n                    }\n                    if (k == 8)  /* end side search*/\n                    {\n                        if (!center_again)\n                        {\n                            k = -1; /* start diagonal search */\n                            cand -= lx;\n                            j--;\n                        }\n                    }\n                    else\n                    {\n                        next = refine_next[k][0];\n                        i += next;\n                        cand += next;\n                        next = refine_next[k][1];\n                        j += next;\n                        cand += lx * next;\n                    }\n                }\n                last_loc = new_loc;\n                step ++;\n            }\n            if (!center_again)\n                AVCMoveNeighborSAD(dn, last_loc);\n\n            *hp_guess = AVCFindMin(dn);\n\n            encvid->rateCtrl->MADofMB[mbnum] = min_sad / 256.0;\n        }\n    }\n\n    mot16x16[mbnum].sad = dmin;\n    mot16x16[mbnum].x = (imin - i0) << 2;\n    mot16x16[mbnum].y = (jmin - j0) << 2;\n    best_cand[0] = ncand;\n\n    if (rateCtrl->subPelEnable) // always enable half-pel search\n    {\n        /* find half-pel resolution motion vector */\n        min_sad = AVCFindHalfPelMB(encvid, cur, mot16x16 + mbnum, best_cand[0], i0, j0, *hp_guess, cmvx, cmvy);\n\n        encvid->rateCtrl->MADofMB[mbnum] = min_sad / 256.0;\n\n\n        if (encvid->best_qpel_pos == -1)\n        {\n            ncand = encvid->hpel_cand[encvid->best_hpel_pos];\n        }\n        else\n        {\n            ncand = encvid->qpel_cand[encvid->best_qpel_pos];\n        }\n    }\n    else\n    {\n        encvid->rateCtrl->MADofMB[mbnum] = min_sad / 256.0;\n    }\n\n    /** do motion comp here for now */\n    ref = currPic->Sl + i0 + j0 * lx;\n    /* copy from the best result to current Picture */\n    for (j = 0; j < 16; j++)\n    {\n        for (i = 0; i < 16; i++)\n        {\n            *ref++ = *ncand++;\n        }\n        ref += (lx - 16);\n        ncand += 8;\n    }\n\n    return ;\n}\n\n#endif\n\n/*===============================================================================\n    Function:   AVCFullSearch\n    Date:       09/16/2000\n    Purpose:    Perform full-search motion estimation over the range of search\n                region in a spiral-outward manner.\n    Input/Output:   VideoEncData, current Vol, previou Vop, pointer to the left corner of\n                current VOP, current coord (also output), boundaries.\n===============================================================================*/\nint AVCFullSearch(AVCEncObject *encvid, uint8 *prev, uint8 *cur,\n                  int *imin, int *jmin, int ilow, int ihigh, int jlow, int jhigh,\n                  int cmvx, int cmvy)\n{\n    int range = encvid->rateCtrl->mvRange;\n    AVCPictureData *currPic = encvid->common->currPic;\n    uint8 *cand;\n    int i, j, k, l;\n    int d, dmin;\n    int i0 = *imin; /* current position */\n    int j0 = *jmin;\n    int (*SAD_Macroblock)(uint8*, uint8*, int, void*) = encvid->functionPointer->SAD_Macroblock;\n    void *extra_info = encvid->sad_extra_info;\n    int lx = currPic->pitch; /* with padding */\n\n    int offset = i0 + j0 * lx;\n\n    int lambda_motion = encvid->lambda_motion;\n    uint8 *mvbits = encvid->mvbits;\n    int mvshift = 2;\n    int mvcost;\n    int min_sad;\n\n    cand = prev + offset;\n\n    dmin  = (*SAD_Macroblock)(cand, cur, (65535 << 16) | lx, (void*)extra_info);\n    mvcost = MV_COST(lambda_motion, mvshift, 0, 0, cmvx, cmvy);\n    min_sad = dmin;\n    dmin += mvcost;\n\n    /* perform spiral search */\n    for (k = 1; k <= range; k++)\n    {\n\n        i = i0 - k;\n        j = j0 - k;\n\n        cand = prev + i + j * lx;\n\n        for (l = 0; l < 8*k; l++)\n        {\n            /* no need for boundary checking again */\n            if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh)\n            {\n                d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, (void*)extra_info);\n                mvcost = MV_COST(lambda_motion, mvshift, i - i0, j - j0, cmvx, cmvy);\n                d +=  mvcost;\n\n                if (d < dmin)\n                {\n                    dmin = d;\n                    *imin = i;\n                    *jmin = j;\n                    min_sad = d - mvcost;\n                }\n            }\n\n            if (l < (k << 1))\n            {\n                i++;\n                cand++;\n            }\n            else if (l < (k << 2))\n            {\n                j++;\n                cand += lx;\n            }\n            else if (l < ((k << 2) + (k << 1)))\n            {\n                i--;\n                cand--;\n            }\n            else\n            {\n                j--;\n                cand -= lx;\n            }\n        }\n    }\n\n    encvid->rateCtrl->MADofMB[encvid->common->mbNum] = (min_sad / 256.0); // for rate control\n\n    return dmin;\n}\n\n/*===============================================================================\n    Function:   AVCCandidateSelection\n    Date:       09/16/2000\n    Purpose:    Fill up the list of candidate using spatio-temporal correlation\n                among neighboring blocks.\n    Input/Output:   type_pred = 0: first pass, 1: second pass, or no SCD\n    Modified:   , 09/23/01, get rid of redundant candidates before passing back.\n                , 09/11/07, added return for modified predicted MV, this will be\n                    needed for both fast search and fullsearch.\n===============================================================================*/\n\nvoid AVCCandidateSelection(int *mvx, int *mvy, int *num_can, int imb, int jmb,\n                           AVCEncObject *encvid, int type_pred, int *cmvx, int *cmvy)\n{\n    AVCCommonObj *video = encvid->common;\n    AVCMV *mot16x16 = encvid->mot16x16;\n    AVCMV *pmot;\n    int mbnum = video->mbNum;\n    int mbwidth = video->PicWidthInMbs;\n    int mbheight = video->PicHeightInMbs;\n    int i, j, same, num1;\n\n    /* this part is for predicted MV */\n    int pmvA_x = 0, pmvA_y = 0, pmvB_x = 0, pmvB_y = 0, pmvC_x = 0, pmvC_y = 0;\n    int availA = 0, availB = 0, availC = 0;\n\n    *num_can = 0;\n\n    if (video->PrevRefFrameNum != 0) // previous frame is an IDR frame\n    {\n        /* Spatio-Temporal Candidate (five candidates) */\n        if (type_pred == 0) /* first pass */\n        {\n            pmot = &mot16x16[mbnum]; /* same coordinate previous frame */\n            mvx[(*num_can)] = (pmot->x) >> 2;\n            mvy[(*num_can)++] = (pmot->y) >> 2;\n            if (imb >= (mbwidth >> 1) && imb > 0)  /*left neighbor previous frame */\n            {\n                pmot = &mot16x16[mbnum-1];\n                mvx[(*num_can)] = (pmot->x) >> 2;\n                mvy[(*num_can)++] = (pmot->y) >> 2;\n            }\n            else if (imb + 1 < mbwidth)   /*right neighbor previous frame */\n            {\n                pmot = &mot16x16[mbnum+1];\n                mvx[(*num_can)] = (pmot->x) >> 2;\n                mvy[(*num_can)++] = (pmot->y) >> 2;\n            }\n\n            if (jmb < mbheight - 1)  /*bottom neighbor previous frame */\n            {\n                pmot = &mot16x16[mbnum+mbwidth];\n                mvx[(*num_can)] = (pmot->x) >> 2;\n                mvy[(*num_can)++] = (pmot->y) >> 2;\n            }\n            else if (jmb > 0)   /*upper neighbor previous frame */\n            {\n                pmot = &mot16x16[mbnum-mbwidth];\n                mvx[(*num_can)] = (pmot->x) >> 2;\n                mvy[(*num_can)++] = (pmot->y) >> 2;\n            }\n\n            if (imb > 0 && jmb > 0)  /* upper-left neighbor current frame*/\n            {\n                pmot = &mot16x16[mbnum-mbwidth-1];\n                mvx[(*num_can)] = (pmot->x) >> 2;\n                mvy[(*num_can)++] = (pmot->y) >> 2;\n            }\n            if (jmb > 0 && imb < mbheight - 1)  /* upper right neighbor current frame*/\n            {\n                pmot = &mot16x16[mbnum-mbwidth+1];\n                mvx[(*num_can)] = (pmot->x) >> 2;\n                mvy[(*num_can)++] = (pmot->y) >> 2;\n            }\n        }\n        else    /* second pass */\n            /* original ST1 algorithm */\n        {\n            pmot = &mot16x16[mbnum]; /* same coordinate previous frame */\n            mvx[(*num_can)] = (pmot->x) >> 2;\n            mvy[(*num_can)++] = (pmot->y) >> 2;\n\n            if (imb > 0)  /*left neighbor current frame */\n            {\n                pmot = &mot16x16[mbnum-1];\n                mvx[(*num_can)] = (pmot->x) >> 2;\n                mvy[(*num_can)++] = (pmot->y) >> 2;\n            }\n            if (jmb > 0)  /*upper neighbor current frame */\n            {\n                pmot = &mot16x16[mbnum-mbwidth];\n                mvx[(*num_can)] = (pmot->x) >> 2;\n                mvy[(*num_can)++] = (pmot->y) >> 2;\n            }\n            if (imb < mbwidth - 1)  /*right neighbor previous frame */\n            {\n                pmot = &mot16x16[mbnum+1];\n                mvx[(*num_can)] = (pmot->x) >> 2;\n                mvy[(*num_can)++] = (pmot->y) >> 2;\n            }\n            if (jmb < mbheight - 1)  /*bottom neighbor previous frame */\n            {\n                pmot = &mot16x16[mbnum+mbwidth];\n                mvx[(*num_can)] = (pmot->x) >> 2;\n                mvy[(*num_can)++] = (pmot->y) >> 2;\n            }\n        }\n\n        /* get predicted MV */\n        if (imb > 0)    /* get MV from left (A) neighbor either on current or previous frame */\n        {\n            availA = 1;\n            pmot = &mot16x16[mbnum-1];\n            pmvA_x = pmot->x;\n            pmvA_y = pmot->y;\n        }\n\n        if (jmb > 0) /* get MV from top (B) neighbor either on current or previous frame */\n        {\n            availB = 1;\n            pmot = &mot16x16[mbnum-mbwidth];\n            pmvB_x = pmot->x;\n            pmvB_y = pmot->y;\n\n            availC = 1;\n\n            if (imb < mbwidth - 1) /* get MV from top-right (C) neighbor of current frame */\n            {\n                pmot = &mot16x16[mbnum-mbwidth+1];\n            }\n            else /* get MV from top-left (D) neighbor of current frame */\n            {\n                pmot = &mot16x16[mbnum-mbwidth-1];\n            }\n            pmvC_x = pmot->x;\n            pmvC_y = pmot->y;\n        }\n\n    }\n    else  /* only Spatial Candidate (four candidates)*/\n    {\n        if (type_pred == 0) /*first pass*/\n        {\n            if (imb > 1)  /* neighbor two blocks away to the left */\n            {\n                pmot = &mot16x16[mbnum-2];\n                mvx[(*num_can)] = (pmot->x) >> 2;\n                mvy[(*num_can)++] = (pmot->y) >> 2;\n            }\n            if (imb > 0 && jmb > 0)  /* upper-left neighbor */\n            {\n                pmot = &mot16x16[mbnum-mbwidth-1];\n                mvx[(*num_can)] = (pmot->x) >> 2;\n                mvy[(*num_can)++] = (pmot->y) >> 2;\n            }\n            if (jmb > 0 && imb < mbheight - 1)  /* upper right neighbor */\n            {\n                pmot = &mot16x16[mbnum-mbwidth+1];\n                mvx[(*num_can)] = (pmot->x) >> 2;\n                mvy[(*num_can)++] = (pmot->y) >> 2;\n            }\n\n            /* get predicted MV */\n            if (imb > 1)    /* get MV from 2nd left (A) neighbor either of current frame */\n            {\n                availA = 1;\n                pmot = &mot16x16[mbnum-2];\n                pmvA_x = pmot->x;\n                pmvA_y = pmot->y;\n            }\n\n            if (jmb > 0 && imb > 0) /* get MV from top-left (B) neighbor of current frame */\n            {\n                availB = 1;\n                pmot = &mot16x16[mbnum-mbwidth-1];\n                pmvB_x = pmot->x;\n                pmvB_y = pmot->y;\n            }\n\n            if (jmb > 0 && imb < mbwidth - 1)\n            {\n                availC = 1;\n                pmot = &mot16x16[mbnum-mbwidth+1];\n                pmvC_x = pmot->x;\n                pmvC_y = pmot->y;\n            }\n        }\n//#ifdef SCENE_CHANGE_DETECTION\n        /* second pass (ST2 algorithm)*/\n        else\n        {\n            if (type_pred == 1) /*  4/7/01 */\n            {\n                if (imb > 0)  /*left neighbor current frame */\n                {\n                    pmot = &mot16x16[mbnum-1];\n                    mvx[(*num_can)] = (pmot->x) >> 2;\n                    mvy[(*num_can)++] = (pmot->y) >> 2;\n                }\n                if (jmb > 0)  /*upper neighbor current frame */\n                {\n                    pmot = &mot16x16[mbnum-mbwidth];\n                    mvx[(*num_can)] = (pmot->x) >> 2;\n                    mvy[(*num_can)++] = (pmot->y) >> 2;\n                }\n                if (imb < mbwidth - 1)  /*right neighbor current frame */\n                {\n                    pmot = &mot16x16[mbnum+1];\n                    mvx[(*num_can)] = (pmot->x) >> 2;\n                    mvy[(*num_can)++] = (pmot->y) >> 2;\n                }\n                if (jmb < mbheight - 1)  /*bottom neighbor current frame */\n                {\n                    pmot = &mot16x16[mbnum+mbwidth];\n                    mvx[(*num_can)] = (pmot->x) >> 2;\n                    mvy[(*num_can)++] = (pmot->y) >> 2;\n                }\n            }\n            //#else\n            else /* original ST1 algorithm */\n            {\n                if (imb > 0)  /*left neighbor current frame */\n                {\n                    pmot = &mot16x16[mbnum-1];\n                    mvx[(*num_can)] = (pmot->x) >> 2;\n                    mvy[(*num_can)++] = (pmot->y) >> 2;\n\n                    if (jmb > 0)  /*upper-left neighbor current frame */\n                    {\n                        pmot = &mot16x16[mbnum-mbwidth-1];\n                        mvx[(*num_can)] = (pmot->x) >> 2;\n                        mvy[(*num_can)++] = (pmot->y) >> 2;\n                    }\n\n                }\n                if (jmb > 0)  /*upper neighbor current frame */\n                {\n                    pmot = &mot16x16[mbnum-mbwidth];\n                    mvx[(*num_can)] = (pmot->x) >> 2;\n                    mvy[(*num_can)++] = (pmot->y) >> 2;\n\n                    if (imb < mbheight - 1)  /*upper-right neighbor current frame */\n                    {\n                        pmot = &mot16x16[mbnum-mbwidth+1];\n                        mvx[(*num_can)] = (pmot->x) >> 2;\n                        mvy[(*num_can)++] = (pmot->y) >> 2;\n                    }\n                }\n            }\n\n            /* get predicted MV */\n            if (imb > 0)    /* get MV from left (A) neighbor either on current or previous frame */\n            {\n                availA = 1;\n                pmot = &mot16x16[mbnum-1];\n                pmvA_x = pmot->x;\n                pmvA_y = pmot->y;\n            }\n\n            if (jmb > 0) /* get MV from top (B) neighbor either on current or previous frame */\n            {\n                availB = 1;\n                pmot = &mot16x16[mbnum-mbwidth];\n                pmvB_x = pmot->x;\n                pmvB_y = pmot->y;\n\n                availC = 1;\n\n                if (imb < mbwidth - 1) /* get MV from top-right (C) neighbor of current frame */\n                {\n                    pmot = &mot16x16[mbnum-mbwidth+1];\n                }\n                else /* get MV from top-left (D) neighbor of current frame */\n                {\n                    pmot = &mot16x16[mbnum-mbwidth-1];\n                }\n                pmvC_x = pmot->x;\n                pmvC_y = pmot->y;\n            }\n        }\n//#endif\n    }\n\n    /*  3/23/01, remove redundant candidate (possible k-mean) */\n    num1 = *num_can;\n    *num_can = 1;\n    for (i = 1; i < num1; i++)\n    {\n        same = 0;\n        j = 0;\n        while (!same && j < *num_can)\n        {\n#if (CANDIDATE_DISTANCE==0)\n            if (mvx[i] == mvx[j] && mvy[i] == mvy[j])\n#else\n            // modified k-mean,  3/24/01, shouldn't be greater than 3\n            if (AVC_ABS(mvx[i] - mvx[j]) + AVC_ABS(mvy[i] - mvy[j]) < CANDIDATE_DISTANCE)\n#endif\n                same = 1;\n            j++;\n        }\n        if (!same)\n        {\n            mvx[*num_can] = mvx[i];\n            mvy[*num_can] = mvy[i];\n            (*num_can)++;\n        }\n    }\n\n    if (num1 == 5 && *num_can == 1)\n        *num_can = ALL_CAND_EQUAL; /* all are equal */\n\n    /* calculate predicted MV */\n\n    if (availA && !(availB || availC))\n    {\n        *cmvx = pmvA_x;\n        *cmvy = pmvA_y;\n    }\n    else\n    {\n        *cmvx = AVC_MEDIAN(pmvA_x, pmvB_x, pmvC_x);\n        *cmvy = AVC_MEDIAN(pmvA_y, pmvB_y, pmvC_y);\n    }\n\n    return ;\n}\n\n\n/*************************************************************\n    Function:   AVCMoveNeighborSAD\n    Date:       3/27/01\n    Purpose:    Move neighboring SAD around when center has shifted\n*************************************************************/\n\nvoid AVCMoveNeighborSAD(int dn[], int new_loc)\n{\n    int tmp[9];\n    tmp[0] = dn[0];\n    tmp[1] = dn[1];\n    tmp[2] = dn[2];\n    tmp[3] = dn[3];\n    tmp[4] = dn[4];\n    tmp[5] = dn[5];\n    tmp[6] = dn[6];\n    tmp[7] = dn[7];\n    tmp[8] = dn[8];\n    dn[0] = dn[1] = dn[2] = dn[3] = dn[4] = dn[5] = dn[6] = dn[7] = dn[8] = 65536;\n\n    switch (new_loc)\n    {\n        case 0:\n            break;\n        case 1:\n            dn[4] = tmp[2];\n            dn[5] = tmp[0];\n            dn[6] = tmp[8];\n            break;\n        case 2:\n            dn[4] = tmp[3];\n            dn[5] = tmp[4];\n            dn[6] = tmp[0];\n            dn[7] = tmp[8];\n            dn[8] = tmp[1];\n            break;\n        case 3:\n            dn[6] = tmp[4];\n            dn[7] = tmp[0];\n            dn[8] = tmp[2];\n            break;\n        case 4:\n            dn[1] = tmp[2];\n            dn[2] = tmp[3];\n            dn[6] = tmp[5];\n            dn[7] = tmp[6];\n            dn[8] = tmp[0];\n            break;\n        case 5:\n            dn[1] = tmp[0];\n            dn[2] = tmp[4];\n            dn[8] = tmp[6];\n            break;\n        case 6:\n            dn[1] = tmp[8];\n            dn[2] = tmp[0];\n            dn[3] = tmp[4];\n            dn[4] = tmp[5];\n            dn[8] = tmp[7];\n            break;\n        case 7:\n            dn[2] = tmp[8];\n            dn[3] = tmp[0];\n            dn[4] = tmp[6];\n            break;\n        case 8:\n            dn[2] = tmp[1];\n            dn[3] = tmp[2];\n            dn[4] = tmp[0];\n            dn[5] = tmp[6];\n            dn[6] = tmp[7];\n            break;\n    }\n    dn[0] = tmp[new_loc];\n\n    return ;\n}\n\n/*  3/28/01, find minimal of dn[9] */\n\nint AVCFindMin(int dn[])\n{\n    int min, i;\n    int dmin;\n\n    dmin = dn[1];\n    min = 1;\n    for (i = 2; i < 9; i++)\n    {\n        if (dn[i] < dmin)\n        {\n            dmin = dn[i];\n            min = i;\n        }\n    }\n\n    return min;\n}\n\n\n\n\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/pvavcencoder.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2010 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"pvavcencoder.h\"\n#include \"oscl_mem.h\"\n\n// xxx pa\n#define LOG_TAG \"pvaencoder\"\n#include \"android/log.h\"\n\n\n/* global static functions */\n\nvoid CbAvcEncDebugLog(uint32 *userData, AVCLogType type, char *string1, int val1, int val2)\n{\n    OSCL_UNUSED_ARG(userData);\n    OSCL_UNUSED_ARG(type);\n    OSCL_UNUSED_ARG(string1);\n    OSCL_UNUSED_ARG(val1);\n    OSCL_UNUSED_ARG(val2);\n\n    return ;\n}\n\nint CbAvcEncMalloc(void *userData, int32 size, int attribute)\n{\n    OSCL_UNUSED_ARG(userData);\n    OSCL_UNUSED_ARG(attribute);\n\n    uint8 *mem;\n\n    mem = (uint8*) oscl_malloc(size);\n\n    return (int)mem;\n}\n\nvoid CbAvcEncFree(void *userData, int mem)\n{\n    OSCL_UNUSED_ARG(userData);\n\n    oscl_free((void*)mem);\n\n    return ;\n}\n\nint CbAvcEncDPBAlloc(void *userData, uint frame_size_in_mbs, uint num_buffers)\n{\n    PVAVCEncoder *pAvcEnc = (PVAVCEncoder*) userData;\n\n    return pAvcEnc->AVC_DPBAlloc(frame_size_in_mbs, num_buffers);\n}\n\nvoid CbAvcEncFrameUnbind(void *userData, int indx)\n{\n    PVAVCEncoder *pAvcEnc = (PVAVCEncoder*) userData;\n\n    pAvcEnc->AVC_FrameUnbind(indx);\n\n    return ;\n}\n\nint CbAvcEncFrameBind(void *userData, int indx, uint8 **yuv)\n{\n    PVAVCEncoder *pAvcEnc = (PVAVCEncoder*) userData;\n\n    return pAvcEnc->AVC_FrameBind(indx, yuv);\n}\n\n\n\n/* ///////////////////////////////////////////////////////////////////////// */\nPVAVCEncoder::PVAVCEncoder()\n{\n\n//iEncoderControl\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF PVAVCEncoder::~PVAVCEncoder()\n{\n    CleanupEncoder();\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF PVAVCEncoder* PVAVCEncoder::New()\n{\n    PVAVCEncoder* self = new PVAVCEncoder;\n    if (self && self->Construct())\n        return self;\n    if (self)\n        delete self;\n    return NULL;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nbool PVAVCEncoder::Construct()\n{\n    oscl_memset((void *)&iAvcHandle, 0, sizeof(AVCHandle));\n\n    iAvcHandle.CBAVC_DPBAlloc = &CbAvcEncDPBAlloc;\n    iAvcHandle.CBAVC_FrameBind = &CbAvcEncFrameBind;\n    iAvcHandle.CBAVC_FrameUnbind = &CbAvcEncFrameUnbind;\n    iAvcHandle.CBAVC_Free = &CbAvcEncFree;\n    iAvcHandle.CBAVC_Malloc = &CbAvcEncMalloc;\n    iAvcHandle.CBAVC_DebugLog = &CbAvcEncDebugLog;\n    iAvcHandle.userData = this;\n\n    iYUVIn = NULL;\n    iState = ECreated;\n    iFramePtr = NULL;\n    iDPB = NULL;\n    iFrameUsed = NULL;\n\n    return true;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF TAVCEI_RETVAL PVAVCEncoder::Initialize(TAVCEIInputFormat *aVidInFormat, TAVCEIEncodeParam *aEncParam)\n{\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Initialize\");\n\n    AVCEncParams aEncOption; /* encoding options */\n\n    iOverrunBuffer = NULL;\n    iOBSize = 0;\n\n    if (EAVCEI_SUCCESS != Init(aVidInFormat, aEncParam, aEncOption))\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Initialize (EAVCEI_SUCCESS != Init(aVidInFormat, aEncParam, aEncOption)) -> return EAVCEI_FAIL\");\n        return EAVCEI_FAIL;\n    }\n\n\n    if (AVCENC_SUCCESS != PVAVCEncInitialize(&iAvcHandle, &aEncOption, NULL, NULL))\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Initialize (AVCENC_SUCCESS != PVAVCEncInitialize(&iAvcHandle, &aEncOption, NULL, NULL)) -> return EAVCEI_FAIL\");\n        return EAVCEI_FAIL;\n    }\n\n    iIDR = true;\n    iDispOrd = 0;\n    iState = EInitialized; // change state to initialized\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Initialize final return EAVCEI_SUCCESS\");\n    return EAVCEI_SUCCESS;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nint32 PVAVCEncoder::GetMaxOutputBufferSize()\n{\n    int size = 0;\n\n    PVAVCEncGetMaxOutputBufferSize(&iAvcHandle, &size);\n\n    return size;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nTAVCEI_RETVAL PVAVCEncoder::Init(TAVCEIInputFormat* aVidInFormat, TAVCEIEncodeParam* aEncParam, AVCEncParams& aEncOption)\n{\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Init\");\n\n    if (iState == EInitialized || iState == EEncoding)  /* clean up before re-initialized */\n    {\n\n        PVAVCCleanUpEncoder(&iAvcHandle);\n        if (iYUVIn)\n        {\n            oscl_free(iYUVIn);\n            iYUVIn = NULL;\n        }\n\n    }\n\n    iState = ECreated; // change state back to created\n\n    iId = aEncParam->iEncodeID;\n\n    iSrcWidth = aVidInFormat->iFrameWidth;\n    iSrcHeight = aVidInFormat->iFrameHeight;\n    iSrcFrameRate = aVidInFormat->iFrameRate;\n    iVideoFormat =  aVidInFormat->iVideoFormat;\n    iFrameOrientation = aVidInFormat->iFrameOrientation;\n\n    // allocate iYUVIn\n    if (iVideoFormat == EAVCEI_VDOFMT_YUV420SEMIPLANAR) /* Not multiple of 16 */\n    {\n        iYUVIn = (uint8*) oscl_malloc((iSrcWidth*iSrcHeight* 3)>>1);\n        if (iYUVIn == NULL)\n        {\n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Init (iYUVIn == NULL) -> return: EAVCEI_FAIL\");\n\n            return EAVCEI_FAIL;\n        }\n    }\n\n    // check the buffer delay according to the clip duration\n    if (aEncParam->iClipDuration > 0 && aEncParam->iRateControlType == EAVCEI_RC_VBR_1)\n    {\n        if (aEncParam->iBufferDelay > (float)(aEncParam->iClipDuration / 10000.0))   //enforce 10% variation of the clip duration as the bound of buffer delay\n        {\n            aEncParam->iBufferDelay = (float)(aEncParam->iClipDuration / 10000.0);\n        }\n    }\n\n    /* Check color format */\n    if ( (iVideoFormat != EAVCEI_VDOFMT_YUV420SEMIPLANAR))\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Init ( (iVideoFormat != EAVCEI_VDOFMT_YUV420SEMIPLANAR)) -> return: EAVCEI_FAIL\");\n\n        return EAVCEI_FAIL;\n    }\n\n    if (aEncParam->iNumLayer > 1)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Init (aEncParam->iNumLayer > 1) -> return: EAVCEI_FAIL\");\n\n        return EAVCEI_FAIL;\n    }\n\n    aEncOption.width = iEncWidth = aEncParam->iFrameWidth[0];\n    aEncOption.height = iEncHeight = aEncParam->iFrameHeight[0];\n\n    iEncFrameRate = aEncParam->iFrameRate[0];\n    aEncOption.frame_rate = (uint32)(1000 * iEncFrameRate);\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Init aEncParam->iRateControlType = %d\", aEncParam->iRateControlType);\n    \n    if (aEncParam->iRateControlType == EAVCEI_RC_CONSTANT_Q)\n    {\n        aEncOption.rate_control = AVC_OFF;\n        aEncOption.bitrate = 64000; // default\n    }\n    else if (aEncParam->iRateControlType == EAVCEI_RC_CBR_1)\n    {\n        aEncOption.rate_control = AVC_ON;\n    }\n    else if (aEncParam->iRateControlType == EAVCEI_RC_VBR_1)\n    {\n        aEncOption.rate_control = AVC_ON;\n    }\n    else\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Init (Unknown aEncParam->iRateControlType = %d) -> return: EAVCEI_FAIL\", aEncParam->iRateControlType);\n        return EAVCEI_FAIL;\n    }\n\n    // future :: map aEncParam->iEncMode to EncMode inside AVCEncoder\n\n    iPacketSize = aEncParam->iPacketSize;\n    aEncOption.profile = mapProfile(aEncParam->iProfile);\n    aEncOption.level = mapLevel(aEncParam->iLevel);\n\n    //aEncOption.src_interval = (int)(1000/aVidInFormat->iFrameRate + 0.5);\n\n    aEncOption.bitrate = aEncParam->iBitRate[0];\n    aEncOption.initQP = aEncParam->iIquant[0];\n\n    aEncOption.init_CBP_removal_delay = (uint32)(aEncParam->iBufferDelay * 1000); // make it millisecond\n    aEncOption.CPB_size = ((uint32)((uint32)aEncParam->iBufferDelay * (aEncOption.bitrate)));\n\n    switch (aEncParam->iIFrameInterval)\n    {\n        case -1:\n            aEncOption.idr_period = 0;  /* all P-frames */\n            break;\n        case 0:\n            aEncOption.idr_period = 1;  /* all IDR-frames */\n            break;\n        default:\n            aEncOption.idr_period = (int)(aEncParam->iIFrameInterval *  aVidInFormat->iFrameRate);\n            break;\n    }\n\n    aEncOption.intramb_refresh = aEncParam->iNumIntraMBRefresh;\n    aEncOption.auto_scd = (aEncParam->iSceneDetection == true) ? AVC_ON : AVC_OFF;\n    aEncOption.out_of_band_param_set = (aEncParam->iOutOfBandParamSet == true) ? AVC_ON : AVC_OFF;\n    aEncOption.use_overrun_buffer = AVC_OFF; // hardcode it to off\n\n    /* default values */\n    aEncOption.poc_type = 0;\n    aEncOption.num_ref_frame = 1;\n\n    aEncOption.log2_max_poc_lsb_minus_4 = 12;\n    aEncOption.num_slice_group = 1;\n    aEncOption.fmo_type = 0; /// FMO is disabled for now.\n    aEncOption.db_filter = AVC_ON;\n    aEncOption.disable_db_idc = 0;\n    aEncOption.alpha_offset = 0;\n    aEncOption.beta_offset = 0;\n    aEncOption.constrained_intra_pred = AVC_OFF;\n\n    aEncOption.data_par = AVC_OFF;\n    aEncOption.fullsearch = AVC_OFF;\n    aEncOption.search_range = 16;\n    aEncOption.sub_pel = AVC_ON;\n    aEncOption.submb_pred = AVC_OFF;\n    aEncOption.rdopt_mode = AVC_OFF;\n    aEncOption.bidir_pred = AVC_OFF;\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Init final return: EAVCEI_SUCCESS\");\n\n    return EAVCEI_SUCCESS;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF TAVCEI_RETVAL PVAVCEncoder::GetParameterSet(uint8 *paramSet, int32 *size, int *aNALType)\n{\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"GetParameterSet\");\n\n    uint aSize;\n    AVCEnc_Status avcStatus ;\n\n    if (iState != EInitialized) {/* has to be initialized first */\n        \n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"GetParameterSet return: EAVCEI_FAIL\");\n        \n        return EAVCEI_FAIL;\n    }\n    aSize = *size;\n\n    if (paramSet == NULL || size == NULL)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"GetParameterSet return: EAVCEI_INPUT_ERROR\");\n\n        return EAVCEI_INPUT_ERROR;\n    }\n\n    //=================>\n    avcStatus = PVAVCEncodeNAL(&iAvcHandle, paramSet, &aSize, aNALType);\n\n    if (avcStatus == AVCENC_WRONG_STATE)\n    {\n        *size = 0;\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"GetParameterSet return: AVCENC_WRONG_STATE-> EAVCEI_FAIL\");\n        return EAVCEI_FAIL;\n    }\n\n    switch (*aNALType)\n    {\n        case AVC_NALTYPE_SPS:\n        case AVC_NALTYPE_PPS:\n            *size = aSize;\n            \n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"GetParameterSet return: SPS/PPS-> EAVCEI_SUCCESS\");\n\n            return EAVCEI_SUCCESS;\n        default:\n            *size = 0;\n            \n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"GetParameterSet return: default-> EAVCEI_FAIL\");\n\n            return EAVCEI_FAIL;\n    }\n\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF TAVCEI_RETVAL PVAVCEncoder::Encode(TAVCEIInputData *aVidIn)\n{\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Encode\");\n\n    AVCEnc_Status status;\n\n    if ((aVidIn == NULL) || (aVidIn->iSource == NULL))\n    {\n        return EAVCEI_INPUT_ERROR;\n    }\n    // we need to check the timestamp here. If it's before the proper time,\n    // we need to return EAVCEI_FRAME_DROP here.\n    // also check whether encoder is ready to take a new frame.\n    if (iState == EEncoding)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Encode (iState == EEncoding) -> return: EAVCEI_NOT_READY\");\n\n        return EAVCEI_NOT_READY;\n    }\n    else if (iState == ECreated)\n    {\n    \n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Encode (iState == ECreated) -> return: EAVCEI_FAIL\");\n\n        return EAVCEI_FAIL;\n    }\n\n    if (iVideoFormat == EAVCEI_VDOFMT_YUV420SEMIPLANAR)\n\n    {\n        if (iYUVIn) /* iSrcWidth is not multiple of 4 or iSrcHeight is odd number */\n        {\n            CopyToYUVIn(aVidIn->iSource,iSrcWidth,iSrcHeight);\n            iVideoIn = iYUVIn;\n        }\n        else /* otherwise, we can just use aVidIn->iSource */\n        {\n            iVideoIn = aVidIn->iSource;  //   Sept 14, 2005 */\n        }\n    } else {\n    \n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Encode (iVideoFormat != EAVCEI_VDOFMT_YUV420SEMIPLANAR) -> return: EAVCEI_INPUT_ERROR\");\n\n    \treturn EAVCEI_INPUT_ERROR;\n    }\n\n    /* assign with backward-P or B-Vop this timestamp must be re-ordered */\n    iTimeStamp = aVidIn->iTimeStamp;\n\n    iVidIn.height = ((iSrcHeight + 15) >> 4) << 4;\n    iVidIn.pitch = ((iSrcWidth + 15) >> 4) << 4;\n    iVidIn.coding_timestamp = iTimeStamp;\n    iVidIn.YCbCr[0] = (uint8*)iVideoIn;\n    iVidIn.YCbCr[1] = (uint8*)(iVideoIn + iVidIn.height * iVidIn.pitch);\n    iVidIn.YCbCr[2] = iVidIn.YCbCr[1] + ((iVidIn.height * iVidIn.pitch) >> 2);\n    iVidIn.disp_order = iDispOrd;\n\n    //================>\n    status = PVAVCEncSetInput(&iAvcHandle, &iVidIn);\n\n    switch (status)\n    {\n        case AVCENC_SKIPPED_PICTURE:\n        \n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Encode AVCENC_SKIPPED_PICTURE-> return: EAVCEI_FRAME_DROP\");\n\n            return EAVCEI_FRAME_DROP;\n        case AVCENC_FAIL: // not in the right state\n        \n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Encode AVCENC_FAIL-> return: EAVCEI_NOT_READY\");\n\n            return EAVCEI_NOT_READY;\n        case AVCENC_SUCCESS:\n            iState = EEncoding;\n            iDispOrd++;\n            \n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Encode AVCENC_SUCCESS-> return: EAVCEI_SUCCESS\");\n\n            return EAVCEI_SUCCESS;\n        case AVCENC_NEW_IDR:\n            iState = EEncoding;\n            iDispOrd++;\n            iIDR = true;\n            \n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Encode AVCENC_NEW_IDR-> return: EAVCEI_SUCCESS\");\n\n            return EAVCEI_SUCCESS;\n        default:\n            \n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"Encode return: default->EAVCEI_FAIL\");\n\n            return EAVCEI_FAIL;\n    }\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF TAVCEI_RETVAL PVAVCEncoder::GetOutput(TAVCEIOutputData *aVidOut, int *aRemainingBytes)\n{\n\n    __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"GetOuput\");\n\n    AVCEnc_Status status;\n    TAVCEI_RETVAL ret;\n    uint Size;\n    int nalType;\n    AVCFrameIO recon;\n    *aRemainingBytes = 0;\n\n    if (iState != EEncoding)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"GetOuput return: EAVCEI_NOT_READY\");\n\n        return EAVCEI_NOT_READY;\n    }\n\n    if (aVidOut == NULL)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"GetOuput return: EAVCEI_INPUT_ERROR\");\n\n        return EAVCEI_INPUT_ERROR;\n    }\n\n\n    if (iOverrunBuffer) // more output buffer to be copied out.\n    {\n        aVidOut->iFragment = true;\n        aVidOut->iTimeStamp = iTimeStamp;\n        aVidOut->iKeyFrame = iIDR;\n        aVidOut->iLastNAL = (iEncStatus == AVCENC_PICTURE_READY) ? true : false;\n\n        if (iOBSize > aVidOut->iBitstreamSize)\n        {\n            oscl_memcpy(aVidOut->iBitstream, iOverrunBuffer, aVidOut->iBitstreamSize);\n            iOBSize -= aVidOut->iBitstreamSize;\n            iOverrunBuffer += aVidOut->iBitstreamSize;\n            aVidOut->iLastFragment = false;\n            *aRemainingBytes = iOBSize;\n\n            __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"GetOuput return: (iLastFragment = false) EAVCEI_MORE_DATA\");\n\n            return EAVCEI_MORE_DATA;\n        }\n        else\n        {\n            oscl_memcpy(aVidOut->iBitstream, iOverrunBuffer, iOBSize);\n            aVidOut->iBitstreamSize = iOBSize;\n            iOverrunBuffer = NULL;\n            iOBSize = 0;\n            aVidOut->iLastFragment = true;\n            *aRemainingBytes = 0;\n\n            if (iEncStatus == AVCENC_PICTURE_READY)\n            {\n                iState = EInitialized;\n                if (iIDR == true)\n                {\n                    iIDR = false;\n                }\n                __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"GetOuput return: (iLastFragment = true) EAVCEI_SUCCESS\");\n\n                return EAVCEI_SUCCESS;\n            }\n            else\n            {\n                __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"GetOuput return: (iLastFragment = true) EAVCEI_MORE_DATA\");\n\n                return EAVCEI_MORE_NAL;\n            }\n        }\n    }\n\n    // Otherwise, call library to encode another NAL\n\n    Size = aVidOut->iBitstreamSize;\n\n    // ==============>\n    iEncStatus = PVAVCEncodeNAL(&iAvcHandle, (uint8*)aVidOut->iBitstream, &Size, &nalType);\n\n    if (iEncStatus == AVCENC_SUCCESS)\n    {\n    \n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"GetOuput (iEncStatus == AVCENC_SUCCESS) -> return EAVCEI_MORE_NAL\");\n\n        aVidOut->iLastNAL = false;\n        aVidOut->iKeyFrame = iIDR;\n        ret = EAVCEI_MORE_NAL;\n    }\n    else if (iEncStatus == AVCENC_PICTURE_READY)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"GetOuput (iEncStatus == AVCENC_PICTURE_READY) -> return EAVCEI_SUCCESS\");\n\n        aVidOut->iLastNAL = true;\n        aVidOut->iKeyFrame = iIDR;\n        ret = EAVCEI_SUCCESS;\n        iState = EInitialized;\n\n        status = PVAVCEncGetRecon(&iAvcHandle, &recon);\n        if (status == AVCENC_SUCCESS)\n        {\n            aVidOut->iFrame = recon.YCbCr[0];\n\n            PVAVCEncReleaseRecon(&iAvcHandle, &recon);\n        }\n    }\n    else if (iEncStatus == AVCENC_SKIPPED_PICTURE)\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"GetOuput iEncStatus == AVCENC_SKIPPED_PICTURE\");\n\n        aVidOut->iLastFragment = true;\n        aVidOut->iFragment = false;\n        aVidOut->iBitstreamSize = 0;\n        aVidOut->iTimeStamp = iTimeStamp;\n        iState = EInitialized;\n        return EAVCEI_FRAME_DROP;\n    }\n    else\n    {\n        __android_log_print(ANDROID_LOG_INFO, LOG_TAG,  \"GetOuput iEncStatus else\");\n\n        return EAVCEI_FAIL;\n    }\n\n    iOverrunBuffer = PVAVCEncGetOverrunBuffer(&iAvcHandle);\n\n    if (iOverrunBuffer) // OB is used\n    {\n        if (Size < (uint)aVidOut->iBitstreamSize) // encoder decides to use OB even though the buffer is big enough\n        {\n            oscl_memcpy(aVidOut->iBitstream, iOverrunBuffer, Size);\n            iOverrunBuffer = NULL; // reset it\n            iOBSize = 0;\n        }\n        else\n        {\n            oscl_memcpy(aVidOut->iBitstream, iOverrunBuffer, aVidOut->iBitstreamSize);\n            iOBSize = Size - aVidOut->iBitstreamSize;\n            iOverrunBuffer += aVidOut->iBitstreamSize;\n            if (iOBSize > 0) // there are more data\n            {\n                iState = EEncoding; // still encoding..\n                aVidOut->iLastFragment = false;\n                aVidOut->iFragment = true;\n                aVidOut->iTimeStamp = iTimeStamp;\n                return EAVCEI_MORE_DATA; // only copy out from iOverrunBuffer next time.\n            }\n        }\n\n    }\n\n    aVidOut->iLastFragment = true; /* for now */\n    aVidOut->iFragment = false;  /* for now */\n    aVidOut->iBitstreamSize = Size;\n    aVidOut->iTimeStamp = iTimeStamp;\n\n    if (iEncStatus == AVCENC_PICTURE_READY && iIDR == true)\n    {\n        iIDR = false;\n    }\n\n    return ret;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF TAVCEI_RETVAL PVAVCEncoder::FlushInput()\n{\n    // do nothing for now.\n    return EAVCEI_SUCCESS;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nTAVCEI_RETVAL PVAVCEncoder::CleanupEncoder()\n{\n    if (iState == EInitialized || iState == EEncoding)\n    {\n        PVAVCCleanUpEncoder(&iAvcHandle);\n        iState = ECreated;\n\n        if (iYUVIn)\n        {\n            oscl_free(iYUVIn);\n            iYUVIn = NULL;\n        }\n    }\n    if (iFrameUsed)\n    {\n        oscl_free(iFrameUsed);\n        iFrameUsed = NULL;\n    }\n    if (iDPB)\n    {\n        oscl_free(iDPB);\n        iDPB = NULL;\n    }\n    if (iFramePtr)\n    {\n        oscl_free(iFramePtr);\n        iFramePtr = NULL;\n    }\n    return EAVCEI_SUCCESS;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF TAVCEI_RETVAL PVAVCEncoder::UpdateBitRate(int32 *aBitRate)\n{\n    if (PVAVCEncUpdateBitRate(&iAvcHandle, aBitRate[0]) == AVCENC_SUCCESS)\n        return EAVCEI_SUCCESS;\n    else\n        return EAVCEI_FAIL;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF TAVCEI_RETVAL PVAVCEncoder::UpdateFrameRate(OsclFloat *aFrameRate)\n{\n    if (PVAVCEncUpdateFrameRate(&iAvcHandle, (uint32)(1000*aFrameRate[0]), 1000) == AVCENC_SUCCESS)\n        return EAVCEI_SUCCESS;\n    else\n        return EAVCEI_FAIL;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF TAVCEI_RETVAL PVAVCEncoder::UpdateIDRFrameInterval(int32 aIDRFrameInterval)\n{\n    if (PVAVCEncUpdateIDRInterval(&iAvcHandle, aIDRFrameInterval) == AVCENC_SUCCESS)\n        return EAVCEI_SUCCESS;\n    else\n        return EAVCEI_FAIL;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF TAVCEI_RETVAL PVAVCEncoder::IDRRequest()\n{\n    if (PVAVCEncIDRRequest(&iAvcHandle) == AVCENC_SUCCESS)\n        return EAVCEI_SUCCESS;\n    else\n        return EAVCEI_FAIL;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF int32 PVAVCEncoder::GetEncodeWidth(int32 aLayer)\n{\n    OSCL_UNUSED_ARG(aLayer);\n    return iEncWidth;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF int32 PVAVCEncoder::GetEncodeHeight(int32 aLayer)\n{\n    OSCL_UNUSED_ARG(aLayer);\n    return iEncHeight;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF OsclFloat PVAVCEncoder::GetEncodeFrameRate(int32 aLayer)\n{\n    OSCL_UNUSED_ARG(aLayer);\n    return iEncFrameRate;\n}\n\n\n\n/* ///////////////////////////////////////////////////////////////////////// */\n/* Copy from YUV input to YUV frame inside M4VEnc lib                       */\n/* When input is not YUV, the color conv will write it directly to iVideoInOut. */\n/* ///////////////////////////////////////////////////////////////////////// */\n\nvoid PVAVCEncoder::CopyToYUVIn(uint8 *YUV, int width, int height)\n{\n\t// Save YUV pointer\n\tuint8* saveiYUVIn = iYUVIn;\n\n\t// Convert YUV input to have distinct Y U and V channels\n\t// Copy Y data\n\tfor (int i=0;i<height;i++){\n\t  for (int j=0;j<width;j++){\n\t\t  *iYUVIn= *YUV;\n\t\t  iYUVIn++;\n\t\t  YUV++;\n\t  }\n\t}\n\n\t// Copy UV data\n\tuint8 *uPos = iYUVIn;\n\tuint8 *vPos = uPos + ((width*height)>>2);\n\tuint16 temp = 0;\n\tuint16* iVideoPtr = (uint16*)YUV;\n\tfor (int i=0;i<(height>>1);i++){\n\t\tfor (int j=0;j<(width>>1);j++){\n\t\t\ttemp = *iVideoPtr++; // U1V1\n\t\t\t*vPos++= (uint8)(temp & 0xFF);\n\t\t\t*uPos++= (uint8)((temp >> 8) & 0xFF);\n\t\t}\n\t}\n\n\t// Restore pointer\n\tiYUVIn = saveiYUVIn;\n\n    return ;\n}\n\nAVCProfile PVAVCEncoder::mapProfile(TAVCEIProfile in)\n{\n    AVCProfile out;\n\n    switch (in)\n    {\n        case EAVCEI_PROFILE_DEFAULT:\n        case EAVCEI_PROFILE_BASELINE:\n            out = AVC_BASELINE;\n            break;\n        case EAVCEI_PROFILE_MAIN:\n            out = AVC_MAIN;\n            break;\n        case EAVCEI_PROFILE_EXTENDED:\n            out = AVC_EXTENDED;\n            break;\n        case EAVCEI_PROFILE_HIGH:\n            out = AVC_HIGH;\n            break;\n        case EAVCEI_PROFILE_HIGH10:\n            out = AVC_HIGH10;\n            break;\n        case EAVCEI_PROFILE_HIGH422:\n            out = AVC_HIGH422;\n            break;\n        case EAVCEI_PROFILE_HIGH444:\n            out = AVC_HIGH444;\n            break;\n        default:\n            out = AVC_BASELINE;\n            break;\n    }\n\n    return out;\n}\n\nAVCLevel PVAVCEncoder::mapLevel(TAVCEILevel in)\n{\n    AVCLevel out;\n\n    switch (in)\n    {\n        case EAVCEI_LEVEL_AUTODETECT:\n            out = AVC_LEVEL_AUTO;\n            break;\n        case EAVCEI_LEVEL_1:\n            out = AVC_LEVEL1;\n            break;\n        case EAVCEI_LEVEL_1B:\n            out = AVC_LEVEL1_B;\n            break;\n        case EAVCEI_LEVEL_11:\n            out = AVC_LEVEL1_1;\n            break;\n        case EAVCEI_LEVEL_12:\n            out = AVC_LEVEL1_2;\n            break;\n        case EAVCEI_LEVEL_13:\n            out = AVC_LEVEL1_3;\n            break;\n        case EAVCEI_LEVEL_2:\n            out = AVC_LEVEL2;\n            break;\n        case EAVCEI_LEVEL_21:\n            out = AVC_LEVEL2_1;\n            break;\n        case EAVCEI_LEVEL_22:\n            out = AVC_LEVEL2_2;\n            break;\n        case EAVCEI_LEVEL_3:\n            out = AVC_LEVEL3;\n            break;\n        case EAVCEI_LEVEL_31:\n            out = AVC_LEVEL3_1;\n            break;\n        case EAVCEI_LEVEL_32:\n            out = AVC_LEVEL3_2;\n            break;\n        case EAVCEI_LEVEL_4:\n            out = AVC_LEVEL4;\n            break;\n        case EAVCEI_LEVEL_41:\n            out = AVC_LEVEL4_1;\n            break;\n        case EAVCEI_LEVEL_42:\n            out = AVC_LEVEL4_2;\n            break;\n        case EAVCEI_LEVEL_5:\n            out = AVC_LEVEL5;\n            break;\n        case EAVCEI_LEVEL_51:\n            out = AVC_LEVEL5_1;\n            break;\n        default:\n            out = AVC_LEVEL5_1;\n            break;\n    }\n\n    return out;\n}\n\n\n/* ///////////////////////////////////////////////////////////////////////// */\n\nint PVAVCEncoder::AVC_DPBAlloc(uint frame_size_in_mbs, uint num_buffers)\n{\n    int ii;\n    uint frame_size = (frame_size_in_mbs << 8) + (frame_size_in_mbs << 7);\n\n    if (iDPB) oscl_free(iDPB); // free previous one first\n\n    iDPB = (uint8*) oscl_malloc(sizeof(uint8) * frame_size * num_buffers);\n    if (iDPB == NULL)\n    {\n        return 0;\n    }\n\n    iNumFrames = num_buffers;\n\n    if (iFrameUsed) oscl_free(iFrameUsed); // free previous one\n\n    iFrameUsed = (bool*) oscl_malloc(sizeof(bool) * num_buffers);\n    if (iFrameUsed == NULL)\n    {\n        return 0;\n    }\n\n    if (iFramePtr) oscl_free(iFramePtr); // free previous one\n    iFramePtr = (uint8**) oscl_malloc(sizeof(uint8*) * num_buffers);\n    if (iFramePtr == NULL)\n    {\n        return 0;\n    }\n\n    iFramePtr[0] = iDPB;\n    iFrameUsed[0] = false;\n\n    for (ii = 1; ii < (int)num_buffers; ii++)\n    {\n        iFrameUsed[ii] = false;\n        iFramePtr[ii] = iFramePtr[ii-1] + frame_size;\n    }\n\n    return 1;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nvoid PVAVCEncoder::AVC_FrameUnbind(int indx)\n{\n    if (indx < iNumFrames)\n    {\n        iFrameUsed[indx] = false;\n    }\n\n    return ;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nint PVAVCEncoder::AVC_FrameBind(int indx, uint8** yuv)\n{\n    if ((iFrameUsed[indx] == true) || (indx >= iNumFrames))\n    {\n        return 0; // already in used\n    }\n\n    iFrameUsed[indx] = true;\n    *yuv = iFramePtr[indx];\n\n    return 1;\n}\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/pvavcencoder_factory.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/**\n * @file pvavcencoder_factory.cpp\n * @brief Singleton factory for PVAVCEncoder\n */\n\n#include \"oscl_base.h\"\n\n#include \"pvavcencoder.h\"\n#include \"pvavcencoder_factory.h\"\n\n#include \"oscl_error_codes.h\"\n#include \"oscl_exception.h\"\n\n// Use default DLL entry point\n#include \"oscl_dll.h\"\n\nOSCL_DLL_ENTRY_POINT_DEFAULT()\n\n////////////////////////////////////////////////////////////////////////////\nOSCL_EXPORT_REF PVAVCEncoderInterface* PVAVCEncoderFactory::CreatePVAVCEncoder()\n{\n    PVAVCEncoderInterface* videoenc = NULL;\n    videoenc = PVAVCEncoder::New();\n    if (videoenc == NULL)\n    {\n        OSCL_LEAVE(OsclErrNoMemory);\n    }\n    return videoenc;\n}\n\n////////////////////////////////////////////////////////////////////////////\nOSCL_EXPORT_REF bool PVAVCEncoderFactory::DeletePVAVCEncoder(PVAVCEncoderInterface* aVideoEnc)\n{\n    if (aVideoEnc)\n    {\n        delete aVideoEnc;\n        return true;\n    }\n\n    return false;\n}\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/rate_control.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2010 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"avcenc_lib.h\"\n#include \"oscl_base_macros.h\"\n#include \"oscl_math.h\"\n#include \"oscl_string.h\"\n\n/* rate control variables */\n#define RC_MAX_QUANT 51\n#define RC_MIN_QUANT 0   //cap to 10 to prevent rate fluctuation    \n\n#define MAD_MIN 1 /* handle the case of devision by zero in RC */\n\n\n/* local functions */\ndouble QP2Qstep(int QP);\nint Qstep2QP(double Qstep);\n\ndouble ComputeFrameMAD(AVCCommonObj *video, AVCRateControl *rateCtrl);\n\nvoid targetBitCalculation(AVCEncObject *encvid, AVCCommonObj *video, AVCRateControl *rateCtrl, MultiPass *pMP);\n\nvoid calculateQuantizer_Multipass(AVCEncObject *encvid, AVCCommonObj *video,\n                                  AVCRateControl *rateCtrl, MultiPass *pMP);\n\nvoid updateRC_PostProc(AVCRateControl *rateCtrl, MultiPass *pMP);\n\nvoid AVCSaveRDSamples(MultiPass *pMP, int counter_samples);\n\nvoid updateRateControl(AVCRateControl *rateControl, int nal_type);\n\nint GetAvgFrameQP(AVCRateControl *rateCtrl)\n{\n    return rateCtrl->Qc;\n}\n\nAVCEnc_Status RCDetermineFrameNum(AVCEncObject *encvid, AVCRateControl *rateCtrl, uint32 modTime, uint *frameNum)\n{\n    AVCCommonObj *video = encvid->common;\n    AVCSliceHeader *sliceHdr = video->sliceHdr;\n    uint32 modTimeRef = encvid->modTimeRef;\n    int32  currFrameNum ;\n    int  frameInc;\n\n\n    /* check with the buffer fullness to make sure that we have enough bits to encode this frame */\n    /* we can use a threshold to guarantee minimum picture quality */\n    /**********************************/\n\n    /* for now, the default is to encode every frame, To Be Changed */\n    if (rateCtrl->first_frame)\n    {\n        encvid->modTimeRef = modTime;\n        encvid->wrapModTime = 0;\n        encvid->prevFrameNum = 0;\n        encvid->prevProcFrameNum = 0;\n\n        *frameNum = 0;\n\n        /* set frame type to IDR-frame */\n        video->nal_unit_type = AVC_NALTYPE_IDR;\n        sliceHdr->slice_type = AVC_I_ALL_SLICE;\n        video->slice_type = AVC_I_SLICE;\n\n        return AVCENC_SUCCESS;\n    }\n    else\n    {\n        if (modTime < modTimeRef) /* modTime wrapped around */\n        {\n            encvid->wrapModTime += ((uint32)0xFFFFFFFF - modTimeRef) + 1;\n            encvid->modTimeRef = modTimeRef = 0;\n        }\n        modTime += encvid->wrapModTime; /* wrapModTime is non zero after wrap-around */\n\n        /* Calculate frame number based on frame rate starting from modTimeRef */\n        /* Note, this value is totally independent from sliceHdr->frame_num or video->CurrPicNum */\n        currFrameNum = (int32)(((modTime - modTimeRef) * rateCtrl->frame_rate + 200) / 1000); /* add small roundings */\n\n        if (currFrameNum <= (int32)encvid->prevProcFrameNum)\n        {\n            return AVCENC_FAIL;  /* this is a late frame do not encode it */\n        }\n\n        frameInc = currFrameNum - encvid->prevProcFrameNum;\n\n        /* Check how many frames have been skipped since the last processed frame */\n        if (frameInc < rateCtrl->skip_next_frame + 1)\n        {\n            return AVCENC_FAIL;  /* frame skip required to maintain the target bit rate. */\n        }\n\n        RCUpdateBuffer(video, rateCtrl, frameInc - rateCtrl->skip_next_frame);  /* in case more frames dropped */\n\n        /* This part would be similar to DetermineVopType of m4venc */\n\n        if ((currFrameNum >= (int32)rateCtrl->idrPeriod && rateCtrl->idrPeriod > 0) /* exceed IDR period */\n                || (currFrameNum >= (int32)video->MaxFrameNum)) /* this line for all P-frames (idrPeriod=0) */\n        {\n            /* Re-assign modTimeRef to the new IDR frame */\n            encvid->modTimeRef += (uint32)(currFrameNum * 1000 / rateCtrl->frame_rate);\n\n            /* Set frame type to IDR-frame */\n            video->nal_unit_type = AVC_NALTYPE_IDR;\n            sliceHdr->slice_type = AVC_I_ALL_SLICE;\n            video->slice_type = AVC_I_SLICE;\n\n            encvid->prevProcFrameNum = *frameNum = 0; // Reset frameNum to zero.\n        }\n        else\n        {\n            video->nal_unit_type = AVC_NALTYPE_SLICE;\n            sliceHdr->slice_type = AVC_P_ALL_SLICE;\n            video->slice_type = AVC_P_SLICE;\n            encvid->prevProcFrameNum = currFrameNum;\n            *frameNum = currFrameNum;\n        }\n\n    }\n\n    return AVCENC_SUCCESS;\n}\n\nvoid RCUpdateBuffer(AVCCommonObj *video, AVCRateControl *rateCtrl, int frameInc)\n{\n    int tmp;\n    MultiPass *pMP = rateCtrl->pMP;\n\n    OSCL_UNUSED_ARG(video);\n\n    if (rateCtrl->rcEnable == TRUE)\n    {\n        if (frameInc > 1)\n        {\n            tmp = rateCtrl->bitsPerFrame * (frameInc - 1);\n            rateCtrl->VBV_fullness -= tmp;\n            pMP->counter_BTsrc += 10 * (frameInc - 1);\n\n            /* Check buffer underflow */\n            if (rateCtrl->VBV_fullness < rateCtrl->low_bound)\n            {\n                rateCtrl->VBV_fullness = rateCtrl->low_bound; // -rateCtrl->Bs/2;\n                rateCtrl->TMN_W = rateCtrl->VBV_fullness - rateCtrl->low_bound;\n                pMP->counter_BTsrc = pMP->counter_BTdst + (int)((OsclFloat)(rateCtrl->Bs / 2 - rateCtrl->low_bound) / 2.0 / (pMP->target_bits_per_frame / 10));\n            }\n        }\n    }\n}\n\n\nAVCEnc_Status InitRateControlModule(AVCHandle *avcHandle)\n{\n    AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject;\n    AVCCommonObj *video = encvid->common;\n    AVCRateControl *rateCtrl = encvid->rateCtrl;\n    double L1, L2, L3, bpp;\n    int qp;\n    int i, j;\n\n    rateCtrl->basicUnit = video->PicSizeInMbs;\n\n    rateCtrl->MADofMB = (double*) avcHandle->CBAVC_Malloc(encvid->avcHandle->userData,\n                        video->PicSizeInMbs * sizeof(double), DEFAULT_ATTR);\n\n    if (!rateCtrl->MADofMB)\n    {\n        goto CLEANUP_RC;\n    }\n\n    if (rateCtrl->rcEnable == TRUE)\n    {\n        rateCtrl->pMP = (MultiPass*) avcHandle->CBAVC_Malloc(encvid->avcHandle->userData, sizeof(MultiPass), DEFAULT_ATTR);\n        if (!rateCtrl->pMP)\n        {\n            goto CLEANUP_RC;\n        }\n        oscl_memset(rateCtrl->pMP, 0, sizeof(MultiPass));\n        rateCtrl->pMP->encoded_frames = -1; /* forget about the very first I frame */\n\n        /* RDInfo **pRDSamples */\n        rateCtrl->pMP->pRDSamples = (RDInfo **)avcHandle->CBAVC_Malloc(encvid->avcHandle->userData, (30 * sizeof(RDInfo *)), DEFAULT_ATTR);\n        if (!rateCtrl->pMP->pRDSamples)\n        {\n            goto CLEANUP_RC;\n        }\n\n        for (i = 0; i < 30; i++)\n        {\n            rateCtrl->pMP->pRDSamples[i] = (RDInfo *)avcHandle->CBAVC_Malloc(encvid->avcHandle->userData, (32 * sizeof(RDInfo)), DEFAULT_ATTR);\n            if (!rateCtrl->pMP->pRDSamples[i])\n            {\n                goto CLEANUP_RC;\n            }\n            for (j = 0; j < 32; j++)    oscl_memset(&(rateCtrl->pMP->pRDSamples[i][j]), 0, sizeof(RDInfo));\n        }\n        rateCtrl->pMP->frameRange = (int)(rateCtrl->frame_rate * 1.0); /* 1.0s time frame*/\n        rateCtrl->pMP->frameRange = AVC_MAX(rateCtrl->pMP->frameRange, 5);\n        rateCtrl->pMP->frameRange = AVC_MIN(rateCtrl->pMP->frameRange, 30);\n\n        rateCtrl->pMP->framePos = -1;\n\n\n        rateCtrl->bitsPerFrame = (int32)(rateCtrl->bitRate / rateCtrl->frame_rate);\n\n        /* BX rate control */\n        rateCtrl->skip_next_frame = 0; /* must be initialized */\n\n        rateCtrl->Bs = rateCtrl->cpbSize;\n        rateCtrl->TMN_W = 0;\n        rateCtrl->VBV_fullness = (int)(rateCtrl->Bs * 0.5); /* rateCtrl->Bs */\n        rateCtrl->encoded_frames = 0;\n\n        rateCtrl->TMN_TH = rateCtrl->bitsPerFrame;\n\n        rateCtrl->max_BitVariance_num = (int)((OsclFloat)(rateCtrl->Bs - rateCtrl->VBV_fullness) / (rateCtrl->bitsPerFrame / 10.0)) - 5;\n        if (rateCtrl->max_BitVariance_num < 0) rateCtrl->max_BitVariance_num += 5;\n\n        // Set the initial buffer fullness\n        /* According to the spec, the initial buffer fullness needs to be set to 1/3 */\n        rateCtrl->VBV_fullness = (int)(rateCtrl->Bs / 3.0 - rateCtrl->Bs / 2.0); /* the buffer range is [-Bs/2, Bs/2] */\n        rateCtrl->pMP->counter_BTsrc = (int)((rateCtrl->Bs / 2.0 - rateCtrl->Bs / 3.0) / (rateCtrl->bitsPerFrame / 10.0));\n        rateCtrl->TMN_W = (int)(rateCtrl->VBV_fullness + rateCtrl->pMP->counter_BTsrc * (rateCtrl->bitsPerFrame / 10.0));\n\n        rateCtrl->low_bound = -rateCtrl->Bs / 2;\n        rateCtrl->VBV_fullness_offset = 0;\n\n        /* Setting the bitrate and framerate */\n        rateCtrl->pMP->bitrate = rateCtrl->bitRate;\n        rateCtrl->pMP->framerate = rateCtrl->frame_rate;\n        rateCtrl->pMP->target_bits_per_frame = rateCtrl->pMP->bitrate / rateCtrl->pMP->framerate;\n\n        /*compute the initial QP*/\n        bpp = 1.0 * rateCtrl->bitRate / (rateCtrl->frame_rate * (video->PicSizeInMbs << 8));\n        if (video->PicWidthInSamplesL == 176)\n        {\n            L1 = 0.1;\n            L2 = 0.3;\n            L3 = 0.6;\n        }\n        else if (video->PicWidthInSamplesL == 352)\n        {\n            L1 = 0.2;\n            L2 = 0.6;\n            L3 = 1.2;\n        }\n        else\n        {\n            L1 = 0.6;\n            L2 = 1.4;\n            L3 = 2.4;\n        }\n\n        if (rateCtrl->initQP == 0)\n        {\n            if (bpp <= L1)\n                qp = 35;\n            else if (bpp <= L2)\n                qp = 25;\n            else if (bpp <= L3)\n                qp = 20;\n            else\n                qp = 15;\n            rateCtrl->initQP = qp;\n        }\n\n        rateCtrl->Qc = rateCtrl->initQP;\n    }\n\n    return AVCENC_SUCCESS;\n\nCLEANUP_RC:\n\n    CleanupRateControlModule(avcHandle);\n    return AVCENC_MEMORY_FAIL;\n\n}\n\n\nvoid CleanupRateControlModule(AVCHandle *avcHandle)\n{\n    AVCEncObject *encvid = (AVCEncObject*) avcHandle->AVCObject;\n    AVCRateControl *rateCtrl = encvid->rateCtrl;\n    int i;\n\n    if (rateCtrl->MADofMB)\n    {\n        avcHandle->CBAVC_Free(avcHandle->userData, (int)(rateCtrl->MADofMB));\n    }\n\n    if (rateCtrl->pMP)\n    {\n        if (rateCtrl->pMP->pRDSamples)\n        {\n            for (i = 0; i < 30; i++)\n            {\n                if (rateCtrl->pMP->pRDSamples[i])\n                {\n                    avcHandle->CBAVC_Free(avcHandle->userData, (int)rateCtrl->pMP->pRDSamples[i]);\n                }\n            }\n            avcHandle->CBAVC_Free(avcHandle->userData, (int)rateCtrl->pMP->pRDSamples);\n        }\n        avcHandle->CBAVC_Free(avcHandle->userData, (int)(rateCtrl->pMP));\n    }\n\n    return ;\n}\n\nvoid RCInitGOP(AVCEncObject *encvid)\n{\n    /* in BX RC, there's no GOP-level RC */\n\n    OSCL_UNUSED_ARG(encvid);\n\n    return ;\n}\n\n\nvoid RCInitFrameQP(AVCEncObject *encvid)\n{\n    AVCCommonObj *video = encvid->common;\n    AVCRateControl *rateCtrl = encvid->rateCtrl;\n    AVCPicParamSet *picParam = video->currPicParams;\n    MultiPass *pMP = rateCtrl->pMP;\n\n    if (rateCtrl->rcEnable == TRUE)\n    {\n        /* frame layer rate control */\n        if (rateCtrl->encoded_frames == 0)\n        {\n            video->QPy = rateCtrl->Qc = rateCtrl->initQP;\n        }\n        else\n        {\n            calculateQuantizer_Multipass(encvid, video, rateCtrl, pMP);\n            video->QPy = rateCtrl->Qc;\n        }\n\n        rateCtrl->NumberofHeaderBits = 0;\n        rateCtrl->NumberofTextureBits = 0;\n        rateCtrl->numFrameBits = 0; // reset\n\n        /* update pMP->framePos */\n        if (++pMP->framePos == pMP->frameRange) pMP->framePos = 0;\n\n        if (rateCtrl->T == 0)\n        {\n            pMP->counter_BTdst = (int)(rateCtrl->frame_rate * 7.5 + 0.5); /* 0.75s time frame */\n            pMP->counter_BTdst = AVC_MIN(pMP->counter_BTdst, (int)(rateCtrl->max_BitVariance_num / 2 * 0.40)); /* 0.75s time frame may go beyond VBV buffer if we set the buffer size smaller than 0.75s */\n            pMP->counter_BTdst = AVC_MAX(pMP->counter_BTdst, (int)((rateCtrl->Bs / 2 - rateCtrl->VBV_fullness) * 0.30 / (rateCtrl->TMN_TH / 10.0) + 0.5)); /* At least 30% of VBV buffer size/2 */\n            pMP->counter_BTdst = AVC_MIN(pMP->counter_BTdst, 20); /* Limit the target to be smaller than 3C */\n\n            pMP->target_bits = rateCtrl->T = rateCtrl->TMN_TH = (int)(rateCtrl->TMN_TH * (1.0 + pMP->counter_BTdst * 0.1));\n            pMP->diff_counter = pMP->counter_BTdst;\n        }\n\n        /* collect the necessary data: target bits, actual bits, mad and QP */\n        pMP->target_bits = rateCtrl->T;\n        pMP->QP  = video->QPy;\n\n        pMP->mad = (OsclFloat)rateCtrl->totalSAD / video->PicSizeInMbs; //ComputeFrameMAD(video, rateCtrl);\n        if (pMP->mad < MAD_MIN) pMP->mad = MAD_MIN; /* MAD_MIN is defined as 1 in mp4def.h */\n\n        pMP->bitrate = rateCtrl->bitRate; /* calculated in RCVopQPSetting */\n        pMP->framerate = rateCtrl->frame_rate;\n\n        /* first pass encoding */\n        pMP->nRe_Quantized = 0;\n\n    } // rcEnable\n    else\n    {\n        video->QPy = rateCtrl->initQP;\n    }\n\n//  printf(\" %d \",video->QPy);\n\n    if (video->CurrPicNum == 0 && encvid->outOfBandParamSet == FALSE)\n    {\n        picParam->pic_init_qs_minus26 = 0;\n        picParam->pic_init_qp_minus26 = video->QPy - 26;\n    }\n\n    // need this for motion estimation\n    encvid->lambda_mode = QP2QUANT[AVC_MAX(0, video->QPy-SHIFT_QP)];\n    encvid->lambda_motion = LAMBDA_FACTOR(encvid->lambda_mode);\n    return ;\n}\n\n/* Mad based variable bit allocation + QP calculation with a new quadratic method */\nvoid calculateQuantizer_Multipass(AVCEncObject *encvid, AVCCommonObj *video,\n                                  AVCRateControl *rateCtrl, MultiPass *pMP)\n{\n    int prev_actual_bits = 0, curr_target, /*pos=0,*/i, j;\n    OsclFloat Qstep, prev_QP = 0.625;\n\n    OsclFloat curr_mad, prev_mad, curr_RD, prev_RD, average_mad, aver_QP;\n\n    /* Mad based variable bit allocation */\n    targetBitCalculation(encvid, video, rateCtrl, pMP);\n\n    if (rateCtrl->T <= 0 || rateCtrl->totalSAD == 0)\n    {\n        if (rateCtrl->T < 0)    rateCtrl->Qc = RC_MAX_QUANT;\n        return;\n    }\n\n    /* ---------------------------------------------------------------------------------------------------*/\n    /* current frame QP estimation */\n    curr_target = rateCtrl->T;\n    curr_mad = (OsclFloat)rateCtrl->totalSAD / video->PicSizeInMbs;\n    if (curr_mad < MAD_MIN) curr_mad = MAD_MIN; /* MAD_MIN is defined as 1 in mp4def.h */\n    curr_RD  = (OsclFloat)curr_target / curr_mad;\n\n    if (rateCtrl->skip_next_frame == -1) // previous was skipped\n    {\n        i = pMP->framePos;\n        prev_mad = pMP->pRDSamples[i][0].mad;\n        prev_QP = pMP->pRDSamples[i][0].QP;\n        prev_actual_bits = pMP->pRDSamples[i][0].actual_bits;\n    }\n    else\n    {\n        /* Another version of search the optimal point */\n        prev_mad = 0.0;\n        i = 0;\n        while (i < pMP->frameRange && prev_mad < 0.001) /* find first one with nonzero prev_mad */\n        {\n            prev_mad = pMP->pRDSamples[i][0].mad;\n            i++;\n        }\n\n        if (i < pMP->frameRange)\n        {\n            prev_actual_bits = pMP->pRDSamples[i-1][0].actual_bits;\n\n            for (j = 0; i < pMP->frameRange; i++)\n            {\n                if (pMP->pRDSamples[i][0].mad != 0 &&\n                        AVC_ABS(prev_mad - curr_mad) > AVC_ABS(pMP->pRDSamples[i][0].mad - curr_mad))\n                {\n                    prev_mad = pMP->pRDSamples[i][0].mad;\n                    prev_actual_bits = pMP->pRDSamples[i][0].actual_bits;\n                    j = i;\n                }\n            }\n            prev_QP = QP2Qstep(pMP->pRDSamples[j][0].QP);\n\n            for (i = 1; i < pMP->samplesPerFrame[j]; i++)\n            {\n                if (AVC_ABS(prev_actual_bits - curr_target) > AVC_ABS(pMP->pRDSamples[j][i].actual_bits - curr_target))\n                {\n                    prev_actual_bits = pMP->pRDSamples[j][i].actual_bits;\n                    prev_QP = QP2Qstep(pMP->pRDSamples[j][i].QP);\n                }\n            }\n        }\n    }\n\n    // quadratic approximation\n    if (prev_mad > 0.001) // only when prev_mad is greater than 0, otherwise keep using the same QP\n    {\n        prev_RD = (OsclFloat)prev_actual_bits / prev_mad;\n        //rateCtrl->Qc = (Int)(prev_QP * sqrt(prev_actual_bits/curr_target) + 0.4);\n        if (prev_QP == 0.625) // added this to allow getting out of QP = 0 easily\n        {\n            Qstep = (int)(prev_RD / curr_RD + 0.5);\n        }\n        else\n        {\n            //      rateCtrl->Qc =(Int)(prev_QP * M4VENC_SQRT(prev_RD/curr_RD) + 0.9);\n\n            if (prev_RD / curr_RD > 0.5 && prev_RD / curr_RD < 2.0)\n                Qstep = (int)(prev_QP * (oscl_sqrt(prev_RD / curr_RD) + prev_RD / curr_RD) / 2.0 + 0.9); /* Quadratic and linear approximation */\n            else\n                Qstep = (int)(prev_QP * (oscl_sqrt(prev_RD / curr_RD) + oscl_pow(prev_RD / curr_RD, 1.0 / 3.0)) / 2.0 + 0.9);\n        }\n        // lower bound on Qc should be a function of curr_mad\n        // When mad is already low, lower bound on Qc doesn't have to be small.\n        // Note, this doesn't work well for low complexity clip encoded at high bit rate\n        // it doesn't hit the target bit rate due to this QP lower bound.\n        /// if((curr_mad < 8) && (rateCtrl->Qc < 12))   rateCtrl->Qc = 12;\n        //  else    if((curr_mad < 128) && (rateCtrl->Qc < 3)) rateCtrl->Qc = 3;\n\n        rateCtrl->Qc = Qstep2QP(Qstep);\n\n        if (rateCtrl->Qc < RC_MIN_QUANT) rateCtrl->Qc = RC_MIN_QUANT;\n        if (rateCtrl->Qc > RC_MAX_QUANT)    rateCtrl->Qc = RC_MAX_QUANT;\n    }\n\n    /* active bit resource protection */\n    aver_QP = (pMP->encoded_frames == 0 ? 0 : pMP->sum_QP / (OsclFloat)pMP->encoded_frames);\n    average_mad = (pMP->encoded_frames == 0 ? 0 : pMP->sum_mad / (OsclFloat)pMP->encoded_frames); /* this function is called from the scond encoded frame*/\n    if (pMP->diff_counter == 0 &&\n            ((OsclFloat)rateCtrl->Qc <= aver_QP*1.1 || curr_mad <= average_mad*1.1) &&\n            pMP->counter_BTsrc <= (pMP->counter_BTdst + (int)(pMP->framerate*1.0 + 0.5)))\n    {\n        rateCtrl->TMN_TH -= (int)(pMP->target_bits_per_frame / 10.0);\n        rateCtrl->T = rateCtrl->TMN_TH - rateCtrl->TMN_W;\n        pMP->counter_BTsrc++;\n        pMP->diff_counter--;\n    }\n\n}\n\nvoid targetBitCalculation(AVCEncObject *encvid, AVCCommonObj *video, AVCRateControl *rateCtrl, MultiPass *pMP)\n{\n    OSCL_UNUSED_ARG(encvid);\n    OsclFloat curr_mad;//, average_mad;\n    int diff_counter_BTsrc, diff_counter_BTdst, prev_counter_diff, curr_counter_diff, bound;\n    /* BT = Bit Transfer, for pMP->counter_BTsrc, pMP->counter_BTdst */\n\n    /* some stuff about frame dropping remained here to be done because pMP cannot be inserted into updateRateControl()*/\n    updateRC_PostProc(rateCtrl, pMP);\n\n    /* update pMP->counter_BTsrc and pMP->counter_BTdst to avoid interger overflow */\n    if (pMP->counter_BTsrc > 1000 && pMP->counter_BTdst > 1000)\n    {\n        pMP->counter_BTsrc -= 1000;\n        pMP->counter_BTdst -= 1000;\n    }\n\n    /* ---------------------------------------------------------------------------------------------------*/\n    /* target calculation */\n    curr_mad = (OsclFloat)rateCtrl->totalSAD / video->PicSizeInMbs;\n    if (curr_mad < MAD_MIN) curr_mad = MAD_MIN; /* MAD_MIN is defined as 1 in mp4def.h */\n    diff_counter_BTsrc = diff_counter_BTdst = 0;\n    pMP->diff_counter = 0;\n\n\n    /*1.calculate average mad */\n    pMP->sum_mad += curr_mad;\n    //average_mad = (pMP->encoded_frames < 1 ? curr_mad : pMP->sum_mad/(OsclFloat)(pMP->encoded_frames+1)); /* this function is called from the scond encoded frame*/\n    //pMP->aver_mad = average_mad;\n    if (pMP->encoded_frames >= 0) /* pMP->encoded_frames is set to -1 initially, so forget about the very first I frame */\n        pMP->aver_mad = (pMP->aver_mad * pMP->encoded_frames + curr_mad) / (pMP->encoded_frames + 1);\n\n    if (pMP->overlapped_win_size > 0 && pMP->encoded_frames_prev >= 0)\n        pMP->aver_mad_prev = (pMP->aver_mad_prev * pMP->encoded_frames_prev + curr_mad) / (pMP->encoded_frames_prev + 1);\n\n    /*2.average_mad, mad ==> diff_counter_BTsrc, diff_counter_BTdst */\n    if (pMP->overlapped_win_size == 0)\n    {\n        /* original verison */\n        if (curr_mad > pMP->aver_mad*1.1)\n        {\n            if (curr_mad / (pMP->aver_mad + 0.0001) > 2)\n                diff_counter_BTdst = (int)(oscl_sqrt(curr_mad / (pMP->aver_mad + 0.0001)) * 10 + 0.4) - 10;\n            //diff_counter_BTdst = (int)((sqrt(curr_mad/pMP->aver_mad)*2+curr_mad/pMP->aver_mad)/(3*0.1) + 0.4) - 10;\n            else\n                diff_counter_BTdst = (int)(curr_mad / (pMP->aver_mad + 0.0001) * 10 + 0.4) - 10;\n        }\n        else /* curr_mad <= average_mad*1.1 */\n            //diff_counter_BTsrc = 10 - (int)((sqrt(curr_mad/pMP->aver_mad) + pow(curr_mad/pMP->aver_mad, 1.0/3.0))/(2.0*0.1) + 0.4);\n            diff_counter_BTsrc = 10 - (int)(oscl_sqrt(curr_mad / (pMP->aver_mad + 0.0001)) * 10 + 0.5);\n\n        /* actively fill in the possible gap */\n        if (diff_counter_BTsrc == 0 && diff_counter_BTdst == 0 &&\n                curr_mad <= pMP->aver_mad*1.1 && pMP->counter_BTsrc < pMP->counter_BTdst)\n            diff_counter_BTsrc = 1;\n\n    }\n    else if (pMP->overlapped_win_size > 0)\n    {\n        /* transition time: use previous average mad \"pMP->aver_mad_prev\" instead of the current average mad \"pMP->aver_mad\" */\n        if (curr_mad > pMP->aver_mad_prev*1.1)\n        {\n            if (curr_mad / pMP->aver_mad_prev > 2)\n                diff_counter_BTdst = (int)(oscl_sqrt(curr_mad / (pMP->aver_mad_prev + 0.0001)) * 10 + 0.4) - 10;\n            //diff_counter_BTdst = (int)((M4VENC_SQRT(curr_mad/pMP->aver_mad_prev)*2+curr_mad/pMP->aver_mad_prev)/(3*0.1) + 0.4) - 10;\n            else\n                diff_counter_BTdst = (int)(curr_mad / (pMP->aver_mad_prev + 0.0001) * 10 + 0.4) - 10;\n        }\n        else /* curr_mad <= average_mad*1.1 */\n            //diff_counter_BTsrc = 10 - (Int)((sqrt(curr_mad/pMP->aver_mad_prev) + pow(curr_mad/pMP->aver_mad_prev, 1.0/3.0))/(2.0*0.1) + 0.4);\n            diff_counter_BTsrc = 10 - (int)(oscl_sqrt(curr_mad / (pMP->aver_mad_prev + 0.0001)) * 10 + 0.5);\n\n        /* actively fill in the possible gap */\n        if (diff_counter_BTsrc == 0 && diff_counter_BTdst == 0 &&\n                curr_mad <= pMP->aver_mad_prev*1.1 && pMP->counter_BTsrc < pMP->counter_BTdst)\n            diff_counter_BTsrc = 1;\n\n        if (--pMP->overlapped_win_size <= 0)    pMP->overlapped_win_size = 0;\n    }\n\n\n    /* if difference is too much, do clipping */\n    /* First, set the upper bound for current bit allocation variance: 80% of available buffer */\n    bound = (int)((rateCtrl->Bs / 2 - rateCtrl->VBV_fullness) * 0.6 / (pMP->target_bits_per_frame / 10)); /* rateCtrl->Bs */\n    diff_counter_BTsrc =  AVC_MIN(diff_counter_BTsrc, bound);\n    diff_counter_BTdst =  AVC_MIN(diff_counter_BTdst, bound);\n\n    /* Second, set another upper bound for current bit allocation: 4-5*bitrate/framerate */\n    bound = 50;\n//  if(video->encParams->RC_Type == CBR_LOWDELAY)\n//  not necessary       bound = 10;  -- For Low delay */\n\n    diff_counter_BTsrc =  AVC_MIN(diff_counter_BTsrc, bound);\n    diff_counter_BTdst =  AVC_MIN(diff_counter_BTdst, bound);\n\n\n    /* Third, check the buffer */\n    prev_counter_diff = pMP->counter_BTdst - pMP->counter_BTsrc;\n    curr_counter_diff = prev_counter_diff + (diff_counter_BTdst - diff_counter_BTsrc);\n\n    if (AVC_ABS(prev_counter_diff) >= rateCtrl->max_BitVariance_num || AVC_ABS(curr_counter_diff) >= rateCtrl->max_BitVariance_num)\n    {   //diff_counter_BTsrc = diff_counter_BTdst = 0;\n\n        if (curr_counter_diff > rateCtrl->max_BitVariance_num && diff_counter_BTdst)\n        {\n            diff_counter_BTdst = (rateCtrl->max_BitVariance_num - prev_counter_diff) + diff_counter_BTsrc;\n            if (diff_counter_BTdst < 0) diff_counter_BTdst = 0;\n        }\n\n        else if (curr_counter_diff < -rateCtrl->max_BitVariance_num && diff_counter_BTsrc)\n        {\n            diff_counter_BTsrc = diff_counter_BTdst - (-rateCtrl->max_BitVariance_num - prev_counter_diff);\n            if (diff_counter_BTsrc < 0) diff_counter_BTsrc = 0;\n        }\n    }\n\n\n    /*3.diff_counter_BTsrc, diff_counter_BTdst ==> TMN_TH */\n    rateCtrl->TMN_TH = (int)(pMP->target_bits_per_frame);\n    pMP->diff_counter = 0;\n\n    if (diff_counter_BTsrc)\n    {\n        rateCtrl->TMN_TH -= (int)(pMP->target_bits_per_frame * diff_counter_BTsrc * 0.1);\n        pMP->diff_counter = -diff_counter_BTsrc;\n    }\n    else if (diff_counter_BTdst)\n    {\n        rateCtrl->TMN_TH += (int)(pMP->target_bits_per_frame * diff_counter_BTdst * 0.1);\n        pMP->diff_counter = diff_counter_BTdst;\n    }\n\n\n    /*4.update pMP->counter_BTsrc, pMP->counter_BTdst */\n    pMP->counter_BTsrc += diff_counter_BTsrc;\n    pMP->counter_BTdst += diff_counter_BTdst;\n\n\n    /*5.target bit calculation */\n    rateCtrl->T = rateCtrl->TMN_TH - rateCtrl->TMN_W;\n\n    return ;\n}\n\nvoid updateRC_PostProc(AVCRateControl *rateCtrl, MultiPass *pMP)\n{\n    if (rateCtrl->skip_next_frame > 0) /* skip next frame */\n    {\n        pMP->counter_BTsrc += 10 * rateCtrl->skip_next_frame;\n\n    }\n    else if (rateCtrl->skip_next_frame == -1) /* skip current frame */\n    {\n        pMP->counter_BTdst -= pMP->diff_counter;\n        pMP->counter_BTsrc += 10;\n\n        pMP->sum_mad -= pMP->mad;\n        pMP->aver_mad = (pMP->aver_mad * pMP->encoded_frames - pMP->mad) / (pMP->encoded_frames - 1 + 0.0001);\n        pMP->sum_QP  -= pMP->QP;\n        pMP->encoded_frames --;\n    }\n    /* some stuff in update VBV_fullness remains here */\n    //if(rateCtrl->VBV_fullness < -rateCtrl->Bs/2) /* rateCtrl->Bs */\n    if (rateCtrl->VBV_fullness < rateCtrl->low_bound)\n    {\n        rateCtrl->VBV_fullness = rateCtrl->low_bound; // -rateCtrl->Bs/2;\n        rateCtrl->TMN_W = rateCtrl->VBV_fullness - rateCtrl->low_bound;\n        pMP->counter_BTsrc = pMP->counter_BTdst + (int)((OsclFloat)(rateCtrl->Bs / 2 - rateCtrl->low_bound) / 2.0 / (pMP->target_bits_per_frame / 10));\n    }\n}\n\n\nvoid RCInitChromaQP(AVCEncObject *encvid)\n{\n    AVCCommonObj *video = encvid->common;\n    AVCMacroblock *currMB = video->currMB;\n    int q_bits;\n\n    /* we have to do the same thing for AVC_CLIP3(0,51,video->QSy) */\n\n    video->QPy_div_6 = (currMB->QPy * 43) >> 8;\n    video->QPy_mod_6 = currMB->QPy - 6 * video->QPy_div_6;\n    currMB->QPc = video->QPc = mapQPi2QPc[AVC_CLIP3(0, 51, currMB->QPy + video->currPicParams->chroma_qp_index_offset)];\n    video->QPc_div_6 = (video->QPc * 43) >> 8;\n    video->QPc_mod_6 = video->QPc - 6 * video->QPc_div_6;\n\n    /* pre-calculate this to save computation */\n    q_bits = 4 + video->QPy_div_6;\n    if (video->slice_type == AVC_I_SLICE)\n    {\n        encvid->qp_const = 682 << q_bits;       // intra\n    }\n    else\n    {\n        encvid->qp_const = 342 << q_bits;       // inter\n    }\n\n    q_bits = 4 + video->QPc_div_6;\n    if (video->slice_type == AVC_I_SLICE)\n    {\n        encvid->qp_const_c = 682 << q_bits;    // intra\n    }\n    else\n    {\n        encvid->qp_const_c = 342 << q_bits;    // inter\n    }\n\n    encvid->lambda_mode = QP2QUANT[AVC_MAX(0, currMB->QPy-SHIFT_QP)];\n    encvid->lambda_motion = LAMBDA_FACTOR(encvid->lambda_mode);\n\n    return ;\n}\n\n\nvoid RCInitMBQP(AVCEncObject *encvid)\n{\n    AVCCommonObj *video =  encvid->common;\n    AVCMacroblock *currMB = video->currMB;\n\n    currMB->QPy = video->QPy; /* set to previous value or picture level */\n\n    RCInitChromaQP(encvid);\n\n}\n\nvoid RCPostMB(AVCCommonObj *video, AVCRateControl *rateCtrl, int num_header_bits, int num_texture_bits)\n{\n    OSCL_UNUSED_ARG(video);\n    rateCtrl->numMBHeaderBits = num_header_bits;\n    rateCtrl->numMBTextureBits = num_texture_bits;\n    rateCtrl->NumberofHeaderBits += rateCtrl->numMBHeaderBits;\n    rateCtrl->NumberofTextureBits += rateCtrl->numMBTextureBits;\n}\n\nvoid RCRestoreQP(AVCMacroblock *currMB, AVCCommonObj *video, AVCEncObject *encvid)\n{\n    currMB->QPy = video->QPy; /* use previous QP */\n    RCInitChromaQP(encvid);\n\n    return ;\n}\n\n\nvoid RCCalculateMAD(AVCEncObject *encvid, AVCMacroblock *currMB, uint8 *orgL, int orgPitch)\n{\n    AVCCommonObj *video = encvid->common;\n    AVCRateControl *rateCtrl = encvid->rateCtrl;\n    uint32 dmin_lx;\n\n    if (rateCtrl->rcEnable == TRUE)\n    {\n        if (currMB->mb_intra)\n        {\n            if (currMB->mbMode == AVC_I16)\n            {\n                dmin_lx = (0xFFFF << 16) | orgPitch;\n                rateCtrl->MADofMB[video->mbNum] = AVCSAD_Macroblock_C(orgL,\n                                                  encvid->pred_i16[currMB->i16Mode], dmin_lx, NULL);\n            }\n            else /* i4 */\n            {\n                rateCtrl->MADofMB[video->mbNum] = encvid->i4_sad / 256.;\n            }\n        }\n        /* for INTER, we have already saved it with the MV search */\n    }\n\n    return ;\n}\n\n\n\nAVCEnc_Status RCUpdateFrame(AVCEncObject *encvid)\n{\n    AVCCommonObj *video = encvid->common;\n    AVCRateControl *rateCtrl = encvid->rateCtrl;\n    AVCEnc_Status status = AVCENC_SUCCESS;\n    MultiPass *pMP = rateCtrl->pMP;\n    int diff_BTCounter;\n    int nal_type = video->nal_unit_type;\n\n    /* update the complexity weight of I, P, B frame */\n\n    if (rateCtrl->rcEnable == TRUE)\n    {\n        pMP->actual_bits = rateCtrl->numFrameBits;\n        pMP->mad = (OsclFloat)rateCtrl->totalSAD / video->PicSizeInMbs; //ComputeFrameMAD(video, rateCtrl);\n\n        AVCSaveRDSamples(pMP, 0);\n\n        pMP->encoded_frames++;\n\n        /* for pMP->samplesPerFrame */\n        pMP->samplesPerFrame[pMP->framePos] = 0;\n\n        pMP->sum_QP += pMP->QP;\n\n        /* update pMP->counter_BTsrc, pMP->counter_BTdst */\n        /* re-allocate the target bit again and then stop encoding */\n        diff_BTCounter = (int)((OsclFloat)(rateCtrl->TMN_TH - rateCtrl->TMN_W - pMP->actual_bits) /\n                               (pMP->bitrate / (pMP->framerate + 0.0001) + 0.0001) / 0.1);\n        if (diff_BTCounter >= 0)\n            pMP->counter_BTsrc += diff_BTCounter; /* pMP->actual_bits is smaller */\n        else\n            pMP->counter_BTdst -= diff_BTCounter; /* pMP->actual_bits is bigger */\n\n        rateCtrl->TMN_TH -= (int)((OsclFloat)pMP->bitrate / (pMP->framerate + 0.0001) * (diff_BTCounter * 0.1));\n        rateCtrl->T = pMP->target_bits = rateCtrl->TMN_TH - rateCtrl->TMN_W;\n        pMP->diff_counter -= diff_BTCounter;\n\n        rateCtrl->Rc = rateCtrl->numFrameBits;  /* Total Bits for current frame */\n        rateCtrl->Hc = rateCtrl->NumberofHeaderBits;    /* Total Bits in Header and Motion Vector */\n\n        /* BX_RC */\n        updateRateControl(rateCtrl, nal_type);\n        if (rateCtrl->skip_next_frame == -1) // skip current frame\n        {\n            status = AVCENC_SKIPPED_PICTURE;\n        }\n    }\n\n    rateCtrl->first_frame = 0;  // reset here after we encode the first frame.\n\n    return status;\n}\n\nvoid AVCSaveRDSamples(MultiPass *pMP, int counter_samples)\n{\n    /* for pMP->pRDSamples */\n    pMP->pRDSamples[pMP->framePos][counter_samples].QP    = pMP->QP;\n    pMP->pRDSamples[pMP->framePos][counter_samples].actual_bits = pMP->actual_bits;\n    pMP->pRDSamples[pMP->framePos][counter_samples].mad   = pMP->mad;\n    pMP->pRDSamples[pMP->framePos][counter_samples].R_D = (OsclFloat)pMP->actual_bits / (pMP->mad + 0.0001);\n\n    return ;\n}\n\nvoid updateRateControl(AVCRateControl *rateCtrl, int nal_type)\n{\n    int  frame_bits;\n    MultiPass *pMP = rateCtrl->pMP;\n\n    /* BX rate contro\\l */\n    frame_bits = (int)(rateCtrl->bitRate / rateCtrl->frame_rate);\n    rateCtrl->TMN_W += (rateCtrl->Rc - rateCtrl->TMN_TH);\n    rateCtrl->VBV_fullness += (rateCtrl->Rc - frame_bits); //rateCtrl->Rp);\n    //if(rateCtrl->VBV_fullness < 0) rateCtrl->VBV_fullness = -1;\n\n    rateCtrl->encoded_frames++;\n\n    /* frame dropping */\n    rateCtrl->skip_next_frame = 0;\n\n    if ((rateCtrl->VBV_fullness > rateCtrl->Bs / 2) && nal_type != AVC_NALTYPE_IDR) /* skip the current frame */ /* rateCtrl->Bs */\n    {\n        rateCtrl->TMN_W -= (rateCtrl->Rc - rateCtrl->TMN_TH);\n        rateCtrl->VBV_fullness -= rateCtrl->Rc;\n        rateCtrl->skip_next_frame = -1;\n    }\n    else if ((OsclFloat)(rateCtrl->VBV_fullness - rateCtrl->VBV_fullness_offset) > (rateCtrl->Bs / 2 - rateCtrl->VBV_fullness_offset)*0.95) /* skip next frame */\n    {\n        rateCtrl->VBV_fullness -= frame_bits; //rateCtrl->Rp;\n        rateCtrl->skip_next_frame = 1;\n        pMP->counter_BTsrc -= (int)((OsclFloat)(rateCtrl->Bs / 2 - rateCtrl->low_bound) / 2.0 / (pMP->target_bits_per_frame / 10));\n        /* BX_1, skip more than 1 frames  */\n        //while(rateCtrl->VBV_fullness > rateCtrl->Bs*0.475)\n        while ((rateCtrl->VBV_fullness - rateCtrl->VBV_fullness_offset) > (rateCtrl->Bs / 2 - rateCtrl->VBV_fullness_offset)*0.95)\n        {\n            rateCtrl->VBV_fullness -= frame_bits; //rateCtrl->Rp;\n            rateCtrl->skip_next_frame++;\n            pMP->counter_BTsrc -= (int)((OsclFloat)(rateCtrl->Bs / 2 - rateCtrl->low_bound) / 2.0 / (pMP->target_bits_per_frame / 10));\n        }\n\n        /* END BX_1 */\n    }\n}\n\n\ndouble ComputeFrameMAD(AVCCommonObj *video, AVCRateControl *rateCtrl)\n{\n    double TotalMAD;\n    int i;\n    TotalMAD = 0.0;\n    for (i = 0; i < (int)video->PicSizeInMbs; i++)\n        TotalMAD += rateCtrl->MADofMB[i];\n    TotalMAD /= video->PicSizeInMbs;\n    return TotalMAD;\n}\n\n\n\n\n\n/* convert from QP to Qstep */\ndouble QP2Qstep(int QP)\n{\n    int i;\n    double Qstep;\n    static const double QP2QSTEP[6] = { 0.625, 0.6875, 0.8125, 0.875, 1.0, 1.125 };\n\n    Qstep = QP2QSTEP[QP % 6];\n    for (i = 0; i < (QP / 6); i++)\n        Qstep *= 2;\n\n    return Qstep;\n}\n\n/* convert from step size to QP */\nint Qstep2QP(double Qstep)\n{\n    int q_per = 0, q_rem = 0;\n\n    //  assert( Qstep >= QP2Qstep(0) && Qstep <= QP2Qstep(51) );\n    if (Qstep < QP2Qstep(0))\n        return 0;\n    else if (Qstep > QP2Qstep(51))\n        return 51;\n\n    while (Qstep > QP2Qstep(5))\n    {\n        Qstep /= 2;\n        q_per += 1;\n    }\n\n    if (Qstep <= (0.625 + 0.6875) / 2)\n    {\n        Qstep = 0.625;\n        q_rem = 0;\n    }\n    else if (Qstep <= (0.6875 + 0.8125) / 2)\n    {\n        Qstep = 0.6875;\n        q_rem = 1;\n    }\n    else if (Qstep <= (0.8125 + 0.875) / 2)\n    {\n        Qstep = 0.8125;\n        q_rem = 2;\n    }\n    else if (Qstep <= (0.875 + 1.0) / 2)\n    {\n        Qstep = 0.875;\n        q_rem = 3;\n    }\n    else if (Qstep <= (1.0 + 1.125) / 2)\n    {\n        Qstep = 1.0;\n        q_rem = 4;\n    }\n    else\n    {\n        Qstep = 1.125;\n        q_rem = 5;\n    }\n\n    return (q_per * 6 + q_rem);\n}\n\n\nvoid RCUpdateParams(AVCRateControl *rateCtrl, AVCEncObject *encvid)\n{\n    int32 prevFrameNum, newFrameNum;\n    uint32 prevModTime;\n\n    if (rateCtrl->frame_rate != rateCtrl->pMP->framerate)\n    {\n        /* this part for frame rate change */\n\n        rateCtrl->pMP->frameRange = (int)(rateCtrl->frame_rate * 1.0); /* 1.0s time frame*/\n        rateCtrl->pMP->frameRange = AVC_MAX(rateCtrl->pMP->frameRange, 5);\n        rateCtrl->pMP->frameRange = AVC_MIN(rateCtrl->pMP->frameRange, 30);\n\n        prevFrameNum = encvid->prevProcFrameNum;  // previous frame number\n\n        // convert from frame num to time based on the previous frame rate\n        prevModTime = (uint32)(prevFrameNum * 1000 / rateCtrl->pMP->framerate);  // offseted by modTimeRef\n\n        // convert back from time to frame num based on new frame rate\n        newFrameNum = (int32)((prevModTime * rateCtrl->frame_rate) / 1000);\n\n        // assign the newFrameNum to prevFrameNum\n        // note, this will cause the IDR frame to come earlier and later than expected !!\n        encvid->prevProcFrameNum = newFrameNum;\n    }\n\n    // recalculate fixed values that are dependent on bitrate and framerate\n\n    rateCtrl->bitsPerFrame = (int32)(rateCtrl->bitRate / rateCtrl->frame_rate);\n\n    rateCtrl->max_BitVariance_num = (int)((OsclFloat)(rateCtrl->Bs - rateCtrl->VBV_fullness) / (rateCtrl->bitsPerFrame / 10.0)) - 5;\n    if (rateCtrl->max_BitVariance_num < 0) rateCtrl->max_BitVariance_num += 5;\n\n    /* no change to rateCtrl->cpbSize, rateCtrl->Bs, rateCtrl->low_bound, rateCtrl->VBV_fullness_offset*/\n\n    /* keep continuity to the following values */\n    /* rateCtrl->pMP->framePos, rateCtrl->TMN_TH, rateCtrl->TMN_W */\n    /* rateCtrl->VBV_fullness, rateCtrl->pMP->counter_BTsrc, */\n\n    /* reset some stats for CalculateQuantizerMultiPass and active bit resource protection */\n    rateCtrl->pMP->sum_QP /= rateCtrl->pMP->encoded_frames;  // reset it to 1\n    rateCtrl->pMP->encoded_frames = 1;\n    rateCtrl->pMP->sum_mad = 0;\n    rateCtrl->T = 0;\n\n    /* Finalizing bitrate and framerate to pMP structure*/\n    rateCtrl->pMP->bitrate = rateCtrl->bitRate;\n    rateCtrl->pMP->framerate = rateCtrl->frame_rate;\n    rateCtrl->pMP->target_bits_per_frame = rateCtrl->pMP->bitrate / rateCtrl->pMP->framerate;\n\n    return ;\n}\n\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/residual.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"avcenc_lib.h\"\n#include \"oscl_mem.h\"\n\nAVCEnc_Status EncodeIntraPCM(AVCEncObject *encvid)\n{\n    AVCEnc_Status status = AVCENC_SUCCESS;\n    AVCCommonObj *video = encvid->common;\n    AVCFrameIO  *currInput = encvid->currInput;\n    AVCEncBitstream *stream = encvid->bitstream;\n    int x_position = (video->mb_x << 4);\n    int y_position = (video->mb_y << 4);\n    int orgPitch = currInput->pitch;\n    int offset1 = y_position * orgPitch + x_position;\n    int i, j;\n    int offset;\n    uint8 *pDst, *pSrc;\n    uint code;\n\n    ue_v(stream, 25);\n\n    i = stream->bit_left & 0x7;\n    if (i) /* not byte-aligned */\n    {\n        BitstreamWriteBits(stream, 0, i);\n    }\n\n    pSrc = currInput->YCbCr[0] + offset1;\n    pDst = video->currPic->Sl + offset1;\n    offset = video->PicWidthInSamplesL - 16;\n\n    /* at this point bitstream is byte-aligned */\n    j = 16;\n    while (j > 0)\n    {\n#if (WORD_SIZE==32)\n        for (i = 0; i < 4; i++)\n        {\n            code = *((uint*)pSrc);\n            pSrc += 4;\n            *((uint*)pDst) = code;\n            pDst += 4;\n            status = BitstreamWriteBits(stream, 32, code);\n        }\n#else\n        for (i = 0; i < 8; i++)\n        {\n            code = *((uint*)pSrc);\n            pSrc += 2;\n            *((uint*)pDst) = code;\n            pDst += 2;\n            status = BitstreamWriteBits(stream, 16, code);\n        }\n#endif\n        pDst += offset;\n        pSrc += offset;\n        j--;\n    }\n    if (status != AVCENC_SUCCESS)  /* check only once per line */\n        return status;\n\n    pDst = video->currPic->Scb + ((offset1 + x_position) >> 2);\n    pSrc = currInput->YCbCr[1] + ((offset1 + x_position) >> 2);\n    offset >>= 1;\n\n    j = 8;\n    while (j > 0)\n    {\n#if (WORD_SIZE==32)\n        for (i = 0; i < 2; i++)\n        {\n            code = *((uint*)pSrc);\n            pSrc += 4;\n            *((uint*)pDst) = code;\n            pDst += 4;\n            status = BitstreamWriteBits(stream, 32, code);\n        }\n#else\n        for (i = 0; i < 4; i++)\n        {\n            code = *((uint*)pSrc);\n            pSrc += 2;\n            *((uint*)pDst) = code;\n            pDst += 2;\n            status = BitstreamWriteBits(stream, 16, code);\n        }\n#endif\n        pDst += offset;\n        pSrc += offset;\n        j--;\n    }\n\n    if (status != AVCENC_SUCCESS)  /* check only once per line */\n        return status;\n\n    pDst = video->currPic->Scr + ((offset1 + x_position) >> 2);\n    pSrc = currInput->YCbCr[2] + ((offset1 + x_position) >> 2);\n\n    j = 8;\n    while (j > 0)\n    {\n#if (WORD_SIZE==32)\n        for (i = 0; i < 2; i++)\n        {\n            code = *((uint*)pSrc);\n            pSrc += 4;\n            *((uint*)pDst) = code;\n            pDst += 4;\n            status = BitstreamWriteBits(stream, 32, code);\n        }\n#else\n        for (i = 0; i < 4; i++)\n        {\n            code = *((uint*)pSrc);\n            pSrc += 2;\n            *((uint*)pDst) = code;\n            pDst += 2;\n            status = BitstreamWriteBits(stream, 16, code);\n        }\n#endif\n        pDst += offset;\n        pSrc += offset;\n        j--;\n    }\n\n    return status;\n}\n\n\nAVCEnc_Status enc_residual_block(AVCEncObject *encvid, AVCResidualType type, int cindx, AVCMacroblock *currMB)\n{\n    AVCEnc_Status status = AVCENC_SUCCESS;\n    AVCCommonObj *video = encvid->common;\n    int i, maxNumCoeff, nC;\n    int cdc = 0, cac = 0;\n    int TrailingOnes;\n    AVCEncBitstream *stream = encvid->bitstream;\n    uint trailing_ones_sign_flag;\n    int zerosLeft;\n    int *level, *run;\n    int TotalCoeff;\n    const static int incVlc[] = {0, 3, 6, 12, 24, 48, 32768};  // maximum vlc = 6\n    int escape, numPrefix, sufmask, suffix, shift, sign, value, absvalue, vlcnum, level_two_or_higher;\n    int bindx = blkIdx2blkXY[cindx>>2][cindx&3] ; // raster scan index\n\n    switch (type)\n    {\n        case AVC_Luma:\n            maxNumCoeff = 16;\n            level = encvid->level[cindx];\n            run = encvid->run[cindx];\n            TotalCoeff = currMB->nz_coeff[bindx];\n            break;\n        case AVC_Intra16DC:\n            maxNumCoeff = 16;\n            level = encvid->leveldc;\n            run = encvid->rundc;\n            TotalCoeff = cindx; /* special case */\n            bindx = 0;\n            cindx = 0;\n            break;\n        case AVC_Intra16AC:\n            maxNumCoeff = 15;\n            level = encvid->level[cindx];\n            run = encvid->run[cindx];\n            TotalCoeff = currMB->nz_coeff[bindx];\n            break;\n        case AVC_ChromaDC:  /* how to differentiate Cb from Cr */\n            maxNumCoeff = 4;\n            cdc = 1;\n            if (cindx >= 8)\n            {\n                level = encvid->levelcdc + 4;\n                run = encvid->runcdc + 4;\n                TotalCoeff = cindx - 8;  /* special case */\n            }\n            else\n            {\n                level = encvid->levelcdc;\n                run = encvid->runcdc;\n                TotalCoeff = cindx;  /* special case */\n            }\n            break;\n        case AVC_ChromaAC:\n            maxNumCoeff = 15;\n            cac = 1;\n            level = encvid->level[cindx];\n            run = encvid->run[cindx];\n            cindx -= 16;\n            bindx = 16 + blkIdx2blkXY[cindx>>2][cindx&3];\n            cindx += 16;\n            TotalCoeff = currMB->nz_coeff[bindx];\n            break;\n        default:\n            return AVCENC_FAIL;\n    }\n\n\n    /* find TrailingOnes */\n    TrailingOnes = 0;\n    zerosLeft = 0;\n    i = TotalCoeff - 1;\n    nC = 1;\n    while (i >= 0)\n    {\n        zerosLeft += run[i];\n        if (nC && (level[i] == 1 || level[i] == -1))\n        {\n            TrailingOnes++;\n        }\n        else\n        {\n            nC = 0;\n        }\n        i--;\n    }\n    if (TrailingOnes > 3)\n    {\n        TrailingOnes = 3; /* clip it */\n    }\n\n    if (!cdc)\n    {\n        if (!cac)  /* not chroma */\n        {\n            nC = predict_nnz(video, bindx & 3, bindx >> 2);\n        }\n        else /* chroma ac but not chroma dc */\n        {\n            nC = predict_nnz_chroma(video, bindx & 3, bindx >> 2);\n        }\n\n        status = ce_TotalCoeffTrailingOnes(stream, TrailingOnes, TotalCoeff, nC);\n    }\n    else\n    {\n        nC = -1; /* Chroma DC level */\n        status = ce_TotalCoeffTrailingOnesChromaDC(stream, TrailingOnes, TotalCoeff);\n    }\n\n    /* This part is done quite differently in ReadCoef4x4_CAVLC() */\n    if (TotalCoeff > 0)\n    {\n\n        i = TotalCoeff - 1;\n\n        if (TrailingOnes) /* keep reading the sign of those trailing ones */\n        {\n            nC = TrailingOnes;\n            trailing_ones_sign_flag = 0;\n            while (nC)\n            {\n                trailing_ones_sign_flag <<= 1;\n                trailing_ones_sign_flag |= ((uint32)level[i--] >> 31); /* 0 or positive, 1 for negative */\n                nC--;\n            }\n\n            /* instead of writing one bit at a time, read the whole thing at once */\n            status = BitstreamWriteBits(stream, TrailingOnes, trailing_ones_sign_flag);\n        }\n\n        level_two_or_higher = 1;\n        if (TotalCoeff > 3 && TrailingOnes == 3)\n        {\n            level_two_or_higher = 0;\n        }\n\n        if (TotalCoeff > 10 && TrailingOnes < 3)\n        {\n            vlcnum = 1;\n        }\n        else\n        {\n            vlcnum = 0;\n        }\n\n        /* then do this TotalCoeff-TrailingOnes times */\n        for (i = TotalCoeff - TrailingOnes - 1; i >= 0; i--)\n        {\n            value = level[i];\n            absvalue = (value >= 0) ? value : -value;\n\n            if (level_two_or_higher)\n            {\n                if (value > 0) value--;\n                else    value++;\n                level_two_or_higher = 0;\n            }\n\n            if (value >= 0)\n            {\n                sign = 0;\n            }\n            else\n            {\n                sign = 1;\n                value = -value;\n            }\n\n            if (vlcnum == 0) // VLC1\n            {\n                if (value < 8)\n                {\n                    status = BitstreamWriteBits(stream, value * 2 + sign - 1, 1);\n                }\n                else if (value < 8 + 8)\n                {\n                    status = BitstreamWriteBits(stream, 14 + 1 + 4, (1 << 4) | ((value - 8) << 1) | sign);\n                }\n                else\n                {\n                    status = BitstreamWriteBits(stream, 14 + 2 + 12, (1 << 12) | ((value - 16) << 1) | sign) ;\n                }\n            }\n            else  // VLCN\n            {\n                shift = vlcnum - 1;\n                escape = (15 << shift) + 1;\n                numPrefix = (value - 1) >> shift;\n                sufmask = ~((0xffffffff) << shift);\n                suffix = (value - 1) & sufmask;\n                if (value < escape)\n                {\n                    status = BitstreamWriteBits(stream, numPrefix + vlcnum + 1, (1 << (shift + 1)) | (suffix << 1) | sign);\n                }\n                else\n                {\n                    status = BitstreamWriteBits(stream, 28, (1 << 12) | ((value - escape) << 1) | sign);\n                }\n\n            }\n\n            if (absvalue > incVlc[vlcnum])\n                vlcnum++;\n\n            if (i == TotalCoeff - TrailingOnes - 1 && absvalue > 3)\n                vlcnum = 2;\n        }\n\n        if (status != AVCENC_SUCCESS)  /* occasionally check the bitstream */\n        {\n            return status;\n        }\n        if (TotalCoeff < maxNumCoeff)\n        {\n            if (!cdc)\n            {\n                ce_TotalZeros(stream, zerosLeft, TotalCoeff);\n            }\n            else\n            {\n                ce_TotalZerosChromaDC(stream, zerosLeft, TotalCoeff);\n            }\n        }\n        else\n        {\n            zerosLeft = 0;\n        }\n\n        i = TotalCoeff - 1;\n        while (i > 0) /* don't do the last one */\n        {\n            if (zerosLeft > 0)\n            {\n                ce_RunBefore(stream, run[i], zerosLeft);\n            }\n\n            zerosLeft = zerosLeft - run[i];\n            i--;\n        }\n    }\n\n    return status;\n}\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/sad.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"avcenc_lib.h\"\n#include \"sad_inline.h\"\n\n#define Cached_lx 176\n\n#ifdef _SAD_STAT\nuint32 num_sad_MB = 0;\nuint32 num_sad_Blk = 0;\nuint32 num_sad_MB_call = 0;\nuint32 num_sad_Blk_call = 0;\n\n#define NUM_SAD_MB_CALL()       num_sad_MB_call++\n#define NUM_SAD_MB()            num_sad_MB++\n#define NUM_SAD_BLK_CALL()      num_sad_Blk_call++\n#define NUM_SAD_BLK()           num_sad_Blk++\n\n#else\n\n#define NUM_SAD_MB_CALL()\n#define NUM_SAD_MB()\n#define NUM_SAD_BLK_CALL()\n#define NUM_SAD_BLK()\n\n#endif\n\n\n/* consist of\nint AVCSAD_Macroblock_C(uint8 *ref,uint8 *blk,int dmin,int lx,void *extra_info)\nint AVCSAD_MB_HTFM_Collect(uint8 *ref,uint8 *blk,int dmin,int lx,void *extra_info)\nint AVCSAD_MB_HTFM(uint8 *ref,uint8 *blk,int dmin,int lx,void *extra_info)\n*/\n\n\n/*==================================================================\n    Function:   SAD_Macroblock\n    Date:       09/07/2000\n    Purpose:    Compute SAD 16x16 between blk and ref.\n    To do:      Uniform subsampling will be inserted later!\n                Hypothesis Testing Fast Matching to be used later!\n    Changes:\n    11/7/00:    implemented MMX\n    1/24/01:    implemented SSE\n==================================================================*/\n/********** C ************/\nint AVCSAD_Macroblock_C(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info)\n{\n    (void)(extra_info);\n\n    int32 x10;\n    int dmin = (uint32)dmin_lx >> 16;\n    int lx = dmin_lx & 0xFFFF;\n\n    NUM_SAD_MB_CALL();\n\n    x10 = simd_sad_mb(ref, blk, dmin, lx);\n\n    return x10;\n}\n\n#ifdef HTFM   /* HTFM with uniform subsampling implementation 2/28/01 */\n/*===============================================================\n    Function:   AVCAVCSAD_MB_HTFM_Collect and AVCSAD_MB_HTFM\n    Date:       3/2/1\n    Purpose:    Compute the SAD on a 16x16 block using\n                uniform subsampling and hypothesis testing fast matching\n                for early dropout. SAD_MB_HP_HTFM_Collect is to collect\n                the statistics to compute the thresholds to be used in\n                SAD_MB_HP_HTFM.\n    Input/Output:\n    Changes:\n  ===============================================================*/\n\nint AVCAVCSAD_MB_HTFM_Collect(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info)\n{\n    int i;\n    int sad = 0;\n    uint8 *p1;\n    int lx4 = (dmin_lx << 2) & 0x3FFFC;\n    uint32 cur_word;\n    int saddata[16], tmp, tmp2;    /* used when collecting flag (global) is on */\n    int difmad;\n    int madstar;\n    HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info;\n    int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg);\n    uint *countbreak = &(htfm_stat->countbreak);\n    int *offsetRef = htfm_stat->offsetRef;\n\n    madstar = (uint32)dmin_lx >> 20;\n\n    NUM_SAD_MB_CALL();\n\n    blk -= 4;\n    for (i = 0; i < 16; i++)\n    {\n        p1 = ref + offsetRef[i];\n        cur_word = *((uint32*)(blk += 4));\n        tmp = p1[12];\n        tmp2 = (cur_word >> 24) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[8];\n        tmp2 = (cur_word >> 16) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[4];\n        tmp2 = (cur_word >> 8) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[0];\n        p1 += lx4;\n        tmp2 = (cur_word & 0xFF);\n        sad = SUB_SAD(sad, tmp, tmp2);\n\n        cur_word = *((uint32*)(blk += 4));\n        tmp = p1[12];\n        tmp2 = (cur_word >> 24) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[8];\n        tmp2 = (cur_word >> 16) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[4];\n        tmp2 = (cur_word >> 8) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[0];\n        p1 += lx4;\n        tmp2 = (cur_word & 0xFF);\n        sad = SUB_SAD(sad, tmp, tmp2);\n\n        cur_word = *((uint32*)(blk += 4));\n        tmp = p1[12];\n        tmp2 = (cur_word >> 24) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[8];\n        tmp2 = (cur_word >> 16) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[4];\n        tmp2 = (cur_word >> 8) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[0];\n        p1 += lx4;\n        tmp2 = (cur_word & 0xFF);\n        sad = SUB_SAD(sad, tmp, tmp2);\n\n        cur_word = *((uint32*)(blk += 4));\n        tmp = p1[12];\n        tmp2 = (cur_word >> 24) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[8];\n        tmp2 = (cur_word >> 16) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[4];\n        tmp2 = (cur_word >> 8) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[0];\n        p1 += lx4;\n        tmp2 = (cur_word & 0xFF);\n        sad = SUB_SAD(sad, tmp, tmp2);\n\n        NUM_SAD_MB();\n\n        saddata[i] = sad;\n\n        if (i > 0)\n        {\n            if ((uint32)sad > ((uint32)dmin_lx >> 16))\n            {\n                difmad = saddata[0] - ((saddata[1] + 1) >> 1);\n                (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);\n                (*countbreak)++;\n                return sad;\n            }\n        }\n    }\n\n    difmad = saddata[0] - ((saddata[1] + 1) >> 1);\n    (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);\n    (*countbreak)++;\n    return sad;\n}\n\nint AVCSAD_MB_HTFM(uint8 *ref, uint8 *blk, int dmin_lx, void *extra_info)\n{\n    int sad = 0;\n    uint8 *p1;\n\n    int i;\n    int tmp, tmp2;\n    int lx4 = (dmin_lx << 2) & 0x3FFFC;\n    int sadstar = 0, madstar;\n    int *nrmlz_th = (int*) extra_info;\n    int *offsetRef = (int*) extra_info + 32;\n    uint32 cur_word;\n\n    madstar = (uint32)dmin_lx >> 20;\n\n    NUM_SAD_MB_CALL();\n\n    blk -= 4;\n    for (i = 0; i < 16; i++)\n    {\n        p1 = ref + offsetRef[i];\n        cur_word = *((uint32*)(blk += 4));\n        tmp = p1[12];\n        tmp2 = (cur_word >> 24) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[8];\n        tmp2 = (cur_word >> 16) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[4];\n        tmp2 = (cur_word >> 8) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[0];\n        p1 += lx4;\n        tmp2 = (cur_word & 0xFF);\n        sad = SUB_SAD(sad, tmp, tmp2);\n\n        cur_word = *((uint32*)(blk += 4));\n        tmp = p1[12];\n        tmp2 = (cur_word >> 24) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[8];\n        tmp2 = (cur_word >> 16) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[4];\n        tmp2 = (cur_word >> 8) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[0];\n        p1 += lx4;\n        tmp2 = (cur_word & 0xFF);\n        sad = SUB_SAD(sad, tmp, tmp2);\n\n        cur_word = *((uint32*)(blk += 4));\n        tmp = p1[12];\n        tmp2 = (cur_word >> 24) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[8];\n        tmp2 = (cur_word >> 16) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[4];\n        tmp2 = (cur_word >> 8) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[0];\n        p1 += lx4;\n        tmp2 = (cur_word & 0xFF);\n        sad = SUB_SAD(sad, tmp, tmp2);\n\n        cur_word = *((uint32*)(blk += 4));\n        tmp = p1[12];\n        tmp2 = (cur_word >> 24) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[8];\n        tmp2 = (cur_word >> 16) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[4];\n        tmp2 = (cur_word >> 8) & 0xFF;\n        sad = SUB_SAD(sad, tmp, tmp2);\n        tmp = p1[0];\n        p1 += lx4;\n        tmp2 = (cur_word & 0xFF);\n        sad = SUB_SAD(sad, tmp, tmp2);\n\n        NUM_SAD_MB();\n\n        sadstar += madstar;\n        if (((uint32)sad <= ((uint32)dmin_lx >> 16)) && (sad <= (sadstar - *nrmlz_th++)))\n            ;\n        else\n            return 65536;\n    }\n\n    return sad;\n}\n#endif /* HTFM */\n\n\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/sad_halfpel.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/* contains\nint AVCHalfPel1_SAD_MB(uint8 *ref,uint8 *blk,int dmin,int width,int ih,int jh)\nint AVCHalfPel2_SAD_MB(uint8 *ref,uint8 *blk,int dmin,int width)\nint AVCHalfPel1_SAD_Blk(uint8 *ref,uint8 *blk,int dmin,int width,int ih,int jh)\nint AVCHalfPel2_SAD_Blk(uint8 *ref,uint8 *blk,int dmin,int width)\n\nint AVCSAD_MB_HalfPel_C(uint8 *ref,uint8 *blk,int dmin,int width,int rx,int xh,int yh,void *extra_info)\nint AVCSAD_MB_HP_HTFM_Collect(uint8 *ref,uint8 *blk,int dmin,int width,int rx,int xh,int yh,void *extra_info)\nint AVCSAD_MB_HP_HTFM(uint8 *ref,uint8 *blk,int dmin,int width,int rx,int xh,int yh,void *extra_info)\nint AVCSAD_Blk_HalfPel_C(uint8 *ref,uint8 *blk,int dmin,int width,int rx,int xh,int yh,void *extra_info)\n*/\n\n#include \"avcenc_lib.h\"\n#include \"sad_halfpel_inline.h\"\n\n#ifdef _SAD_STAT\nuint32 num_sad_HP_MB = 0;\nuint32 num_sad_HP_Blk = 0;\nuint32 num_sad_HP_MB_call = 0;\nuint32 num_sad_HP_Blk_call = 0;\n#define NUM_SAD_HP_MB_CALL()    num_sad_HP_MB_call++\n#define NUM_SAD_HP_MB()         num_sad_HP_MB++\n#define NUM_SAD_HP_BLK_CALL()   num_sad_HP_Blk_call++\n#define NUM_SAD_HP_BLK()        num_sad_HP_Blk++\n#else\n#define NUM_SAD_HP_MB_CALL()\n#define NUM_SAD_HP_MB()\n#define NUM_SAD_HP_BLK_CALL()\n#define NUM_SAD_HP_BLK()\n#endif\n\n\n\n/*===============================================================\n    Function:   SAD_MB_HalfPel\n    Date:       09/17/2000\n    Purpose:    Compute the SAD on the half-pel resolution\n    Input/Output:   hmem is assumed to be a pointer to the starting\n                point of the search in the 33x33 matrix search region\n    Changes:\n    11/7/00:    implemented MMX\n  ===============================================================*/\n/*==================================================================\n    Function:   AVCSAD_MB_HalfPel_C\n    Date:       04/30/2001\n    Purpose:    Compute SAD 16x16 between blk and ref in halfpel\n                resolution,\n    Changes:\n  ==================================================================*/\n/* One component is half-pel */\nint AVCSAD_MB_HalfPel_Cxhyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)\n{\n    (void)(extra_info);\n\n    int i, j;\n    int sad = 0;\n    uint8 *kk, *p1, *p2, *p3, *p4;\n//  int sumref=0;\n    int temp;\n    int rx = dmin_rx & 0xFFFF;\n\n    NUM_SAD_HP_MB_CALL();\n\n    p1 = ref;\n    p2 = ref + 1;\n    p3 = ref + rx;\n    p4 = ref + rx + 1;\n    kk  = blk;\n\n    for (i = 0; i < 16; i++)\n    {\n        for (j = 0; j < 16; j++)\n        {\n\n            temp = ((p1[j] + p2[j] + p3[j] + p4[j] + 2) >> 2) - *kk++;\n            sad += AVC_ABS(temp);\n        }\n\n        NUM_SAD_HP_MB();\n\n        if (sad > (int)((uint32)dmin_rx >> 16))\n            return sad;\n\n        p1 += rx;\n        p3 += rx;\n        p2 += rx;\n        p4 += rx;\n    }\n    return sad;\n}\n\nint AVCSAD_MB_HalfPel_Cyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)\n{\n    (void)(extra_info);\n\n    int i, j;\n    int sad = 0;\n    uint8 *kk, *p1, *p2;\n//  int sumref=0;\n    int temp;\n    int rx = dmin_rx & 0xFFFF;\n\n    NUM_SAD_HP_MB_CALL();\n\n    p1 = ref;\n    p2 = ref + rx; /* either left/right or top/bottom pixel */\n    kk  = blk;\n\n    for (i = 0; i < 16; i++)\n    {\n        for (j = 0; j < 16; j++)\n        {\n\n            temp = ((p1[j] + p2[j] + 1) >> 1) - *kk++;\n            sad += AVC_ABS(temp);\n        }\n\n        NUM_SAD_HP_MB();\n\n        if (sad > (int)((uint32)dmin_rx >> 16))\n            return sad;\n        p1 += rx;\n        p2 += rx;\n    }\n    return sad;\n}\n\nint AVCSAD_MB_HalfPel_Cxh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)\n{\n    (void)(extra_info);\n\n    int i, j;\n    int sad = 0;\n    uint8 *kk, *p1;\n    int temp;\n    int rx = dmin_rx & 0xFFFF;\n\n    NUM_SAD_HP_MB_CALL();\n\n    p1 = ref;\n    kk  = blk;\n\n    for (i = 0; i < 16; i++)\n    {\n        for (j = 0; j < 16; j++)\n        {\n\n            temp = ((p1[j] + p1[j+1] + 1) >> 1) - *kk++;\n            sad += AVC_ABS(temp);\n        }\n\n        NUM_SAD_HP_MB();\n\n        if (sad > (int)((uint32)dmin_rx >> 16))\n            return sad;\n        p1 += rx;\n    }\n    return sad;\n}\n\n#ifdef HTFM  /* HTFM with uniform subsampling implementation,  2/28/01 */\n\n//Checheck here\nint AVCAVCSAD_MB_HP_HTFM_Collectxhyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)\n{\n    int i, j;\n    int sad = 0;\n    uint8 *p1, *p2;\n    int rx = dmin_rx & 0xFFFF;\n    int refwx4 = rx << 2;\n    int saddata[16];      /* used when collecting flag (global) is on */\n    int difmad, tmp, tmp2;\n    int madstar;\n    HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info;\n    int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg);\n    UInt *countbreak = &(htfm_stat->countbreak);\n    int *offsetRef = htfm_stat->offsetRef;\n    uint32 cur_word;\n\n    madstar = (uint32)dmin_rx >> 20;\n\n    NUM_SAD_HP_MB_CALL();\n\n    blk -= 4;\n\n    for (i = 0; i < 16; i++) /* 16 stages */\n    {\n        p1 = ref + offsetRef[i];\n        p2 = p1 + rx;\n\n        j = 4;/* 4 lines */\n        do\n        {\n            cur_word = *((uint32*)(blk += 4));\n            tmp = p1[12] + p2[12];\n            tmp2 = p1[13] + p2[13];\n            tmp += tmp2;\n            tmp2 = (cur_word >> 24) & 0xFF;\n            tmp += 2;\n            sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;\n            tmp = p1[8] + p2[8];\n            tmp2 = p1[9] + p2[9];\n            tmp += tmp2;\n            tmp2 = (cur_word >> 16) & 0xFF;\n            tmp += 2;\n            sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;\n            tmp = p1[4] + p2[4];\n            tmp2 = p1[5] + p2[5];\n            tmp += tmp2;\n            tmp2 = (cur_word >> 8) & 0xFF;\n            tmp += 2;\n            sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;\n            tmp2 = p1[1] + p2[1];\n            tmp = p1[0] + p2[0];\n            p1 += refwx4;\n            p2 += refwx4;\n            tmp += tmp2;\n            tmp2 = (cur_word & 0xFF);\n            tmp += 2;\n            sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;\n        }\n        while (--j);\n\n        NUM_SAD_HP_MB();\n\n        saddata[i] = sad;\n\n        if (i > 0)\n        {\n            if (sad > ((uint32)dmin_rx >> 16))\n            {\n                difmad = saddata[0] - ((saddata[1] + 1) >> 1);\n                (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);\n                (*countbreak)++;\n                return sad;\n            }\n        }\n    }\n    difmad = saddata[0] - ((saddata[1] + 1) >> 1);\n    (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);\n    (*countbreak)++;\n\n    return sad;\n}\n\nint AVCAVCSAD_MB_HP_HTFM_Collectyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)\n{\n    int i, j;\n    int sad = 0;\n    uint8 *p1, *p2;\n    int rx = dmin_rx & 0xFFFF;\n    int refwx4 = rx << 2;\n    int saddata[16];      /* used when collecting flag (global) is on */\n    int difmad, tmp, tmp2;\n    int madstar;\n    HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info;\n    int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg);\n    UInt *countbreak = &(htfm_stat->countbreak);\n    int *offsetRef = htfm_stat->offsetRef;\n    uint32 cur_word;\n\n    madstar = (uint32)dmin_rx >> 20;\n\n    NUM_SAD_HP_MB_CALL();\n\n    blk -= 4;\n\n    for (i = 0; i < 16; i++) /* 16 stages */\n    {\n        p1 = ref + offsetRef[i];\n        p2 = p1 + rx;\n        j = 4;\n        do\n        {\n            cur_word = *((uint32*)(blk += 4));\n            tmp = p1[12];\n            tmp2 = p2[12];\n            tmp++;\n            tmp2 += tmp;\n            tmp = (cur_word >> 24) & 0xFF;\n            sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n            tmp = p1[8];\n            tmp2 = p2[8];\n            tmp++;\n            tmp2 += tmp;\n            tmp = (cur_word >> 16) & 0xFF;\n            sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n            tmp = p1[4];\n            tmp2 = p2[4];\n            tmp++;\n            tmp2 += tmp;\n            tmp = (cur_word >> 8) & 0xFF;\n            sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n            tmp = p1[0];\n            p1 += refwx4;\n            tmp2 = p2[0];\n            p2 += refwx4;\n            tmp++;\n            tmp2 += tmp;\n            tmp = (cur_word & 0xFF);\n            sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n        }\n        while (--j);\n\n        NUM_SAD_HP_MB();\n\n        saddata[i] = sad;\n\n        if (i > 0)\n        {\n            if (sad > ((uint32)dmin_rx >> 16))\n            {\n                difmad = saddata[0] - ((saddata[1] + 1) >> 1);\n                (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);\n                (*countbreak)++;\n                return sad;\n            }\n        }\n    }\n    difmad = saddata[0] - ((saddata[1] + 1) >> 1);\n    (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);\n    (*countbreak)++;\n\n    return sad;\n}\n\nint AVCAVCSAD_MB_HP_HTFM_Collectxh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)\n{\n    int i, j;\n    int sad = 0;\n    uint8 *p1;\n    int rx = dmin_rx & 0xFFFF;\n    int refwx4 = rx << 2;\n    int saddata[16];      /* used when collecting flag (global) is on */\n    int difmad, tmp, tmp2;\n    int madstar;\n    HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info;\n    int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg);\n    UInt *countbreak = &(htfm_stat->countbreak);\n    int *offsetRef = htfm_stat->offsetRef;\n    uint32 cur_word;\n\n    madstar = (uint32)dmin_rx >> 20;\n\n    NUM_SAD_HP_MB_CALL();\n\n    blk -= 4;\n\n    for (i = 0; i < 16; i++) /* 16 stages */\n    {\n        p1 = ref + offsetRef[i];\n\n        j = 4; /* 4 lines */\n        do\n        {\n            cur_word = *((uint32*)(blk += 4));\n            tmp = p1[12];\n            tmp2 = p1[13];\n            tmp++;\n            tmp2 += tmp;\n            tmp = (cur_word >> 24) & 0xFF;\n            sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n            tmp = p1[8];\n            tmp2 = p1[9];\n            tmp++;\n            tmp2 += tmp;\n            tmp = (cur_word >> 16) & 0xFF;\n            sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n            tmp = p1[4];\n            tmp2 = p1[5];\n            tmp++;\n            tmp2 += tmp;\n            tmp = (cur_word >> 8) & 0xFF;\n            sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n            tmp = p1[0];\n            tmp2 = p1[1];\n            p1 += refwx4;\n            tmp++;\n            tmp2 += tmp;\n            tmp = (cur_word & 0xFF);\n            sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n        }\n        while (--j);\n\n        NUM_SAD_HP_MB();\n\n        saddata[i] = sad;\n\n        if (i > 0)\n        {\n            if (sad > ((uint32)dmin_rx >> 16))\n            {\n                difmad = saddata[0] - ((saddata[1] + 1) >> 1);\n                (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);\n                (*countbreak)++;\n                return sad;\n            }\n        }\n    }\n    difmad = saddata[0] - ((saddata[1] + 1) >> 1);\n    (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);\n    (*countbreak)++;\n\n    return sad;\n}\n\nint AVCSAD_MB_HP_HTFMxhyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)\n{\n    int i, j;\n    int sad = 0, tmp, tmp2;\n    uint8 *p1, *p2;\n    int rx = dmin_rx & 0xFFFF;\n    int refwx4 = rx << 2;\n    int sadstar = 0, madstar;\n    int *nrmlz_th = (int*) extra_info;\n    int *offsetRef = nrmlz_th + 32;\n    uint32 cur_word;\n\n    madstar = (uint32)dmin_rx >> 20;\n\n    NUM_SAD_HP_MB_CALL();\n\n    blk -= 4;\n\n    for (i = 0; i < 16; i++) /* 16 stages */\n    {\n        p1 = ref + offsetRef[i];\n        p2 = p1 + rx;\n\n        j = 4; /* 4 lines */\n        do\n        {\n            cur_word = *((uint32*)(blk += 4));\n            tmp = p1[12] + p2[12];\n            tmp2 = p1[13] + p2[13];\n            tmp += tmp2;\n            tmp2 = (cur_word >> 24) & 0xFF;\n            tmp += 2;\n            sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;\n            tmp = p1[8] + p2[8];\n            tmp2 = p1[9] + p2[9];\n            tmp += tmp2;\n            tmp2 = (cur_word >> 16) & 0xFF;\n            tmp += 2;\n            sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;\n            tmp = p1[4] + p2[4];\n            tmp2 = p1[5] + p2[5];\n            tmp += tmp2;\n            tmp2 = (cur_word >> 8) & 0xFF;\n            tmp += 2;\n            sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;\n            tmp2 = p1[1] + p2[1];\n            tmp = p1[0] + p2[0];\n            p1 += refwx4;\n            p2 += refwx4;\n            tmp += tmp2;\n            tmp2 = (cur_word & 0xFF);\n            tmp += 2;\n            sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;\n        }\n        while (--j);\n\n        NUM_SAD_HP_MB();\n\n        sadstar += madstar;\n        if (sad > sadstar - nrmlz_th[i] || sad > ((uint32)dmin_rx >> 16))\n        {\n            return 65536;\n        }\n    }\n\n    return sad;\n}\n\nint AVCSAD_MB_HP_HTFMyh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)\n{\n    int i, j;\n    int sad = 0, tmp, tmp2;\n    uint8 *p1, *p2;\n    int rx = dmin_rx & 0xFFFF;\n    int refwx4 = rx << 2;\n    int sadstar = 0, madstar;\n    int *nrmlz_th = (int*) extra_info;\n    int *offsetRef = nrmlz_th + 32;\n    uint32 cur_word;\n\n    madstar = (uint32)dmin_rx >> 20;\n\n    NUM_SAD_HP_MB_CALL();\n\n    blk -= 4;\n\n    for (i = 0; i < 16; i++) /* 16 stages */\n    {\n        p1 = ref + offsetRef[i];\n        p2 = p1 + rx;\n        j = 4;\n        do\n        {\n            cur_word = *((uint32*)(blk += 4));\n            tmp = p1[12];\n            tmp2 = p2[12];\n            tmp++;\n            tmp2 += tmp;\n            tmp = (cur_word >> 24) & 0xFF;\n            sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n            tmp = p1[8];\n            tmp2 = p2[8];\n            tmp++;\n            tmp2 += tmp;\n            tmp = (cur_word >> 16) & 0xFF;\n            sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n            tmp = p1[4];\n            tmp2 = p2[4];\n            tmp++;\n            tmp2 += tmp;\n            tmp = (cur_word >> 8) & 0xFF;\n            sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n            tmp = p1[0];\n            p1 += refwx4;\n            tmp2 = p2[0];\n            p2 += refwx4;\n            tmp++;\n            tmp2 += tmp;\n            tmp = (cur_word & 0xFF);\n            sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n        }\n        while (--j);\n\n        NUM_SAD_HP_MB();\n        sadstar += madstar;\n        if (sad > sadstar - nrmlz_th[i] || sad > ((uint32)dmin_rx >> 16))\n        {\n            return 65536;\n        }\n    }\n\n    return sad;\n}\n\nint AVCSAD_MB_HP_HTFMxh(uint8 *ref, uint8 *blk, int dmin_rx, void *extra_info)\n{\n    int i, j;\n    int sad = 0, tmp, tmp2;\n    uint8 *p1;\n    int rx = dmin_rx & 0xFFFF;\n    int refwx4 = rx << 2;\n    int sadstar = 0, madstar;\n    int *nrmlz_th = (int*) extra_info;\n    int *offsetRef = nrmlz_th + 32;\n    uint32 cur_word;\n\n    madstar = (uint32)dmin_rx >> 20;\n\n    NUM_SAD_HP_MB_CALL();\n\n    blk -= 4;\n\n    for (i = 0; i < 16; i++) /* 16 stages */\n    {\n        p1 = ref + offsetRef[i];\n\n        j = 4;/* 4 lines */\n        do\n        {\n            cur_word = *((uint32*)(blk += 4));\n            tmp = p1[12];\n            tmp2 = p1[13];\n            tmp++;\n            tmp2 += tmp;\n            tmp = (cur_word >> 24) & 0xFF;\n            sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n            tmp = p1[8];\n            tmp2 = p1[9];\n            tmp++;\n            tmp2 += tmp;\n            tmp = (cur_word >> 16) & 0xFF;\n            sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n            tmp = p1[4];\n            tmp2 = p1[5];\n            tmp++;\n            tmp2 += tmp;\n            tmp = (cur_word >> 8) & 0xFF;\n            sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n            tmp = p1[0];\n            tmp2 = p1[1];\n            p1 += refwx4;\n            tmp++;\n            tmp2 += tmp;\n            tmp = (cur_word & 0xFF);\n            sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n        }\n        while (--j);\n\n        NUM_SAD_HP_MB();\n\n        sadstar += madstar;\n        if (sad > sadstar - nrmlz_th[i] || sad > ((uint32)dmin_rx >> 16))\n        {\n            return 65536;\n        }\n    }\n\n    return sad;\n}\n\n#endif /* HTFM */\n\n\n\n\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/sad_halfpel_inline.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n\n#ifndef _SAD_HALFPEL_INLINE_H_\n#define _SAD_HALFPEL_INLINE_H_\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n#if defined(__GNUC__) && defined(__arm__) /* ARM GNU COMPILER  */\n\n    __inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)\n    {\n        tmp = (tmp2 >> 1) - tmp;\n        if (tmp > 0) sad += tmp;\n        else sad -= tmp;\n\n        return sad;\n    }\n\n    __inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)\n    {\n        tmp = (tmp >> 2) - tmp2;\n        if (tmp > 0) sad += tmp;\n        else sad -= tmp;\n\n        return sad;\n    }\n\n#elif defined(__CC_ARM)  /* only work with arm v5 */\n\n    __inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)\n    {\n        __asm\n        {\n            rsbs    tmp, tmp, tmp2, asr #1 ;\n            rsbmi   tmp, tmp, #0 ;\n            add     sad, sad, tmp ;\n        }\n\n        return sad;\n    }\n\n    __inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)\n    {\n        __asm\n        {\n            rsbs    tmp, tmp2, tmp, asr #2 ;\n            rsbmi   tmp, tmp, #0 ;\n            add     sad, sad, tmp ;\n        }\n\n        return sad;\n    }\n\n#elif defined(__GNUC__) && defined(__arm__) /* ARM GNU COMPILER  */\n\n    __inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)\n    {\n__asm__ volatile(\"rsbs  %1, %1, %2, asr #1\\n\\trsbmi %1, %1, #0\\n\\tadd  %0, %0, %1\": \"=r\"(sad), \"=r\"(tmp): \"r\"(tmp2));\n\n        return sad;\n    }\n\n    __inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)\n    {\n__asm__ volatile(\"rsbs  %1, %2, %1, asr #2\\n\\trsbmi %1, %1, #0\\n\\tadd  %0, %0, %1\": \"=r\"(sad), \"=r\"(tmp): \"r\"(tmp2));\n\n        return sad;\n    }\n\n#endif\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif //_SAD_HALFPEL_INLINE_H_\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/sad_inline.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef _SAD_INLINE_H_\n#define _SAD_INLINE_H_\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n#if defined(__GNUC__) && defined(__arm__) /* ARM GNU COMPILER  */\n\n    __inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2)\n    {\n        tmp = tmp - tmp2;\n        if (tmp > 0) sad += tmp;\n        else sad -= tmp;\n\n        return sad;\n    }\n\n    __inline int32 sad_4pixel(int32 src1, int32 src2, int32 mask)\n    {\n        int32 x7;\n\n        x7 = src2 ^ src1;       /* check odd/even combination */\n        if ((uint32)src2 >= (uint32)src1)\n        {\n            src1 = src2 - src1;     /* subs */\n        }\n        else\n        {\n            src1 = src1 - src2;\n        }\n        x7 = x7 ^ src1;     /* only odd bytes need to add carry */\n        x7 = mask & ((uint32)x7 >> 1);\n        x7 = (x7 << 8) - x7;\n        src1 = src1 + (x7 >> 7); /* add 0xFF to the negative byte, add back carry */\n        src1 = src1 ^(x7 >> 7);   /* take absolute value of negative byte */\n\n        return src1;\n    }\n\n#define NUMBER 3\n#define SHIFT 24\n\n#include \"sad_mb_offset.h\"\n\n#undef NUMBER\n#define NUMBER 2\n#undef SHIFT\n#define SHIFT 16\n#include \"sad_mb_offset.h\"\n\n#undef NUMBER\n#define NUMBER 1\n#undef SHIFT\n#define SHIFT 8\n#include \"sad_mb_offset.h\"\n\n\n    __inline int32 simd_sad_mb(uint8 *ref, uint8 *blk, int dmin, int lx)\n    {\n        int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;\n\n        x9 = 0x80808080; /* const. */\n\n        x8 = (uint32)ref & 0x3;\n        if (x8 == 3)\n            goto SadMBOffset3;\n        if (x8 == 2)\n            goto SadMBOffset2;\n        if (x8 == 1)\n            goto SadMBOffset1;\n\n//  x5 = (x4<<8)-x4; /* x5 = x4*255; */\n        x4 = x5 = 0;\n\n        x6 = 0xFFFF00FF;\n\n        ref -= lx;\n        blk -= 16;\n\n        x8 = 16;\n\nLOOP_SAD0:\n        /****** process 8 pixels ******/\n        x10 = *((uint32*)(ref += lx));\n        x11 = *((uint32*)(ref + 4));\n        x12 = *((uint32*)(blk += 16));\n        x14 = *((uint32*)(blk + 4));\n\n        /* process x11 & x14 */\n        x11 = sad_4pixel(x11, x14, x9);\n\n        /* process x12 & x10 */\n        x10 = sad_4pixel(x10, x12, x9);\n\n        x5 = x5 + x10; /* accumulate low bytes */\n        x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x10 >> 8);  /* accumulate high bytes */\n        x5 = x5 + x11;  /* accumulate low bytes */\n        x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x11 >> 8);  /* accumulate high bytes */\n\n        /****** process 8 pixels ******/\n        x10 = *((uint32*)(ref + 8));\n        x11 = *((uint32*)(ref + 12));\n        x12 = *((uint32*)(blk + 8));\n        x14 = *((uint32*)(blk + 12));\n\n        /* process x11 & x14 */\n        x11 = sad_4pixel(x11, x14, x9);\n\n        /* process x12 & x10 */\n        x10 = sad_4pixel(x10, x12, x9);\n\n        x5 = x5 + x10;  /* accumulate low bytes */\n        x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */\n        x5 = x5 + x11;  /* accumulate low bytes */\n        x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x11 >> 8);  /* accumulate high bytes */\n\n        /****************/\n        x10 = x5 - (x4 << 8); /* extract low bytes */\n        x10 = x10 + x4;     /* add with high bytes */\n        x10 = x10 + (x10 << 16); /* add with lower half word */\n\n        if ((int)((uint32)x10 >> 16) <= dmin) /* compare with dmin */\n        {\n            if (--x8)\n            {\n                goto LOOP_SAD0;\n            }\n\n        }\n\n        return ((uint32)x10 >> 16);\n\nSadMBOffset3:\n\n        return sad_mb_offset3(ref, blk, lx, dmin);\n\nSadMBOffset2:\n\n        return sad_mb_offset2(ref, blk, lx, dmin);\n\nSadMBOffset1:\n\n        return sad_mb_offset1(ref, blk, lx, dmin);\n\n    }\n\n#elif defined(__CC_ARM)  /* only work with arm v5 */\n\n    __inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2)\n    {\n        __asm\n        {\n            rsbs    tmp, tmp, tmp2 ;\n            rsbmi   tmp, tmp, #0 ;\n            add     sad, sad, tmp ;\n        }\n\n        return sad;\n    }\n\n    __inline int32 sad_4pixel(int32 src1, int32 src2, int32 mask)\n    {\n        int32 x7;\n\n        __asm\n        {\n            EOR     x7, src2, src1;     /* check odd/even combination */\n            SUBS    src1, src2, src1;\n            EOR     x7, x7, src1;\n            AND     x7, mask, x7, lsr #1;\n            ORRCC   x7, x7, #0x80000000;\n            RSB     x7, x7, x7, lsl #8;\n            ADD     src1, src1, x7, asr #7;   /* add 0xFF to the negative byte, add back carry */\n            EOR     src1, src1, x7, asr #7;   /* take absolute value of negative byte */\n        }\n\n        return src1;\n    }\n\n    __inline int32 sad_4pixelN(int32 src1, int32 src2, int32 mask)\n    {\n        int32 x7;\n\n        __asm\n        {\n            EOR      x7, src2, src1;        /* check odd/even combination */\n            ADDS     src1, src2, src1;\n            EOR      x7, x7, src1;      /* only odd bytes need to add carry */\n            ANDS     x7, mask, x7, rrx;\n            RSB      x7, x7, x7, lsl #8;\n            SUB      src1, src1, x7, asr #7;  /* add 0xFF to the negative byte, add back carry */\n            EOR      src1, src1, x7, asr #7; /* take absolute value of negative byte */\n        }\n\n        return src1;\n    }\n\n#define sum_accumulate  __asm{      SBC      x5, x5, x10;  /* accumulate low bytes */ \\\n        BIC      x10, x6, x10;   /* x10 & 0xFF00FF00 */ \\\n        ADD      x4, x4, x10,lsr #8;   /* accumulate high bytes */ \\\n        SBC      x5, x5, x11;    /* accumulate low bytes */ \\\n        BIC      x11, x6, x11;   /* x11 & 0xFF00FF00 */ \\\n        ADD      x4, x4, x11,lsr #8; } /* accumulate high bytes */\n\n\n#define NUMBER 3\n#define SHIFT 24\n#define INC_X8 0x08000001\n\n#include \"sad_mb_offset.h\"\n\n#undef NUMBER\n#define NUMBER 2\n#undef SHIFT\n#define SHIFT 16\n#undef INC_X8\n#define INC_X8 0x10000001\n#include \"sad_mb_offset.h\"\n\n#undef NUMBER\n#define NUMBER 1\n#undef SHIFT\n#define SHIFT 8\n#undef INC_X8\n#define INC_X8 0x08000001\n#include \"sad_mb_offset.h\"\n\n\n    __inline int32 simd_sad_mb(uint8 *ref, uint8 *blk, int dmin, int lx)\n    {\n        int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;\n\n        x9 = 0x80808080; /* const. */\n        x4 = x5 = 0;\n\n        __asm\n        {\n            MOVS    x8, ref, lsl #31 ;\n            BHI     SadMBOffset3;\n            BCS     SadMBOffset2;\n            BMI     SadMBOffset1;\n\n            MVN     x6, #0xFF00;\n        }\nLOOP_SAD0:\n        /****** process 8 pixels ******/\n        x11 = *((int32*)(ref + 12));\n        x10 = *((int32*)(ref + 8));\n        x14 = *((int32*)(blk + 12));\n        x12 = *((int32*)(blk + 8));\n\n        /* process x11 & x14 */\n        x11 = sad_4pixel(x11, x14, x9);\n\n        /* process x12 & x10 */\n        x10 = sad_4pixel(x10, x12, x9);\n\n        x5 = x5 + x10;  /* accumulate low bytes */\n        x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */\n        x5 = x5 + x11;  /* accumulate low bytes */\n        x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x11 >> 8);  /* accumulate high bytes */\n\n        __asm\n        {\n            /****** process 8 pixels ******/\n            LDR     x11, [ref, #4];\n            LDR     x10, [ref], lx ;\n            LDR     x14, [blk, #4];\n            LDR     x12, [blk], #16 ;\n        }\n\n        /* process x11 & x14 */\n        x11 = sad_4pixel(x11, x14, x9);\n\n        /* process x12 & x10 */\n        x10 = sad_4pixel(x10, x12, x9);\n\n        x5 = x5 + x10;  /* accumulate low bytes */\n        x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */\n        x5 = x5 + x11;  /* accumulate low bytes */\n        x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x11 >> 8);  /* accumulate high bytes */\n\n        /****************/\n        x10 = x5 - (x4 << 8); /* extract low bytes */\n        x10 = x10 + x4;     /* add with high bytes */\n        x10 = x10 + (x10 << 16); /* add with lower half word */\n\n        __asm\n        {\n            /****************/\n            RSBS    x11, dmin, x10, lsr #16;\n            ADDLSS  x8, x8, #0x10000001;\n            BLS     LOOP_SAD0;\n        }\n\n        return ((uint32)x10 >> 16);\n\nSadMBOffset3:\n\n        return sad_mb_offset3(ref, blk, lx, dmin, x8);\n\nSadMBOffset2:\n\n        return sad_mb_offset2(ref, blk, lx, dmin, x8);\n\nSadMBOffset1:\n\n        return sad_mb_offset1(ref, blk, lx, dmin, x8);\n    }\n\n\n#elif defined(__GNUC__) && defined(__arm__) /* ARM GNU COMPILER  */\n\n    __inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2)\n    {\n__asm__ volatile(\"rsbs  %1, %1, %2\\n\\trsbmi %1, %1, #0\\n\\tadd  %0, %0, %1\": \"=r\"(sad): \"r\"(tmp), \"r\"(tmp2));\n        return sad;\n    }\n\n    __inline int32 sad_4pixel(int32 src1, int32 src2, int32 mask)\n    {\n        int32 x7;\n\n__asm__ volatile(\"EOR  %1, %2, %0\\n\\tSUBS  %0, %2, %0\\n\\tEOR  %1, %1, %0\\n\\tAND  %1, %3, %1, lsr #1\\n\\tORRCC  %1, %1, #0x80000000\\n\\tRSB  %1, %1, %1, lsl #8\\n\\tADD  %0, %0, %1, asr #7\\n\\tEOR  %0, %0, %1, asr #7\": \"=r\"(src1), \"=&r\"(x7): \"r\"(src2), \"r\"(mask));\n\n        return src1;\n    }\n\n    __inline int32 sad_4pixelN(int32 src1, int32 src2, int32 mask)\n    {\n        int32 x7;\n\n__asm__ volatile(\"EOR  %1, %2, %0\\n\\tADDS  %0, %2, %0\\n\\tEOR  %1, %1, %0\\n\\tANDS  %1, %3, %1, rrx\\n\\tRSB  %1, %1, %1, lsl #8\\n\\tSUB  %0, %0, %1, asr #7\\n\\tEOR   %0, %0, %1, asr #7\": \"=r\"(src1), \"=&r\"(x7): \"r\"(src2), \"r\"(mask));\n\n        return src1;\n    }\n\n#define sum_accumulate  __asm__ volatile(\"SBC  %0, %0, %1\\n\\tBIC   %1, %4, %1\\n\\tADD   %2, %2, %1, lsr #8\\n\\tSBC   %0, %0, %3\\n\\tBIC   %3, %4, %3\\n\\tADD   %2, %2, %3, lsr #8\": \"=&r\" (x5), \"=&r\" (x10), \"=&r\" (x4), \"=&r\" (x11): \"r\" (x6));\n\n#define NUMBER 3\n#define SHIFT 24\n#define INC_X8 0x08000001\n\n#include \"sad_mb_offset.h\"\n\n#undef NUMBER\n#define NUMBER 2\n#undef SHIFT\n#define SHIFT 16\n#undef INC_X8\n#define INC_X8 0x10000001\n#include \"sad_mb_offset.h\"\n\n#undef NUMBER\n#define NUMBER 1\n#undef SHIFT\n#define SHIFT 8\n#undef INC_X8\n#define INC_X8 0x08000001\n#include \"sad_mb_offset.h\"\n\n\n    __inline int32 simd_sad_mb(uint8 *ref, uint8 *blk, int dmin, int lx)\n    {\n        int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;\n\n        x9 = 0x80808080; /* const. */\n        x4 = x5 = 0;\n\n        x8 = (uint32)ref & 0x3;\n        if (x8 == 3)\n            goto SadMBOffset3;\n        if (x8 == 2)\n            goto SadMBOffset2;\n        if (x8 == 1)\n            goto SadMBOffset1;\n\n        x8 = 16;\n///\n__asm__ volatile(\"MVN  %0, #0xFF00\": \"=r\"(x6));\n\nLOOP_SAD0:\n        /****** process 8 pixels ******/\n        x11 = *((int32*)(ref + 12));\n        x10 = *((int32*)(ref + 8));\n        x14 = *((int32*)(blk + 12));\n        x12 = *((int32*)(blk + 8));\n\n        /* process x11 & x14 */\n        x11 = sad_4pixel(x11, x14, x9);\n\n        /* process x12 & x10 */\n        x10 = sad_4pixel(x10, x12, x9);\n\n        x5 = x5 + x10;  /* accumulate low bytes */\n        x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */\n        x5 = x5 + x11;  /* accumulate low bytes */\n        x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x11 >> 8);  /* accumulate high bytes */\n\n        /****** process 8 pixels ******/\n        x11 = *((int32*)(ref + 4));\n__asm__ volatile(\"LDR  %0, [%1], %2\": \"=&r\"(x10), \"=r\"(ref): \"r\"(lx));\n        //x10 = *((int32*)ref); ref+=lx;\n        x14 = *((int32*)(blk + 4));\n__asm__ volatile(\"LDR  %0, [%1], #16\": \"=&r\"(x12), \"=r\"(blk));\n\n        /* process x11 & x14 */\n        x11 = sad_4pixel(x11, x14, x9);\n\n        /* process x12 & x10 */\n        x10 = sad_4pixel(x10, x12, x9);\n\n        x5 = x5 + x10;  /* accumulate low bytes */\n        x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */\n        x5 = x5 + x11;  /* accumulate low bytes */\n        x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x11 >> 8);  /* accumulate high bytes */\n\n        /****************/\n        x10 = x5 - (x4 << 8); /* extract low bytes */\n        x10 = x10 + x4;     /* add with high bytes */\n        x10 = x10 + (x10 << 16); /* add with lower half word */\n\n        /****************/\n\n        if (((uint32)x10 >> 16) <= dmin) /* compare with dmin */\n        {\n            if (--x8)\n            {\n                goto LOOP_SAD0;\n            }\n\n        }\n\n        return ((uint32)x10 >> 16);\n\nSadMBOffset3:\n\n        return sad_mb_offset3(ref, blk, lx, dmin);\n\nSadMBOffset2:\n\n        return sad_mb_offset2(ref, blk, lx, dmin);\n\nSadMBOffset1:\n\n        return sad_mb_offset1(ref, blk, lx, dmin);\n    }\n\n\n#endif\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif // _SAD_INLINE_H_\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/sad_mb_offset.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n\n#if defined(__GNUC__) && defined(__arm__) /* ARM GNU COMPILER  */\n\n#if (NUMBER==3)\n__inline int32 sad_mb_offset3(uint8 *ref, uint8 *blk, int lx, int dmin)\n#elif (NUMBER==2)\n__inline int32 sad_mb_offset2(uint8 *ref, uint8 *blk, int lx, int dmin)\n#elif (NUMBER==1)\n__inline int32 sad_mb_offset1(uint8 *ref, uint8 *blk, int lx, int dmin)\n#endif\n{\n    int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;\n\n    //  x5 = (x4<<8) - x4;\n    x4 = x5 = 0;\n    x6 = 0xFFFF00FF;\n    x9 = 0x80808080; /* const. */\n    ref -= NUMBER; /* bic ref, ref, #3 */\n    ref -= lx;\n    blk -= 16;\n    x8 = 16;\n\n#if (NUMBER==3)\nLOOP_SAD3:\n#elif (NUMBER==2)\nLOOP_SAD2:\n#elif (NUMBER==1)\nLOOP_SAD1:\n#endif\n    /****** process 8 pixels ******/\n    x10 = *((uint32*)(ref += lx)); /* D C B A */\n    x11 = *((uint32*)(ref + 4));    /* H G F E */\n    x12 = *((uint32*)(ref + 8));    /* L K J I */\n\n    x10 = ((uint32)x10 >> SHIFT); /* 0 0 0 D */\n    x10 = x10 | (x11 << (32 - SHIFT));        /* G F E D */\n    x11 = ((uint32)x11 >> SHIFT); /* 0 0 0 H */\n    x11 = x11 | (x12 << (32 - SHIFT));        /* K J I H */\n\n    x12 = *((uint32*)(blk += 16));\n    x14 = *((uint32*)(blk + 4));\n\n    /* process x11 & x14 */\n    x11 = sad_4pixel(x11, x14, x9);\n\n    /* process x12 & x10 */\n    x10 = sad_4pixel(x10, x12, x9);\n\n    x5 = x5 + x10; /* accumulate low bytes */\n    x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */\n    x4 = x4 + ((uint32)x10 >> 8);  /* accumulate high bytes */\n    x5 = x5 + x11;  /* accumulate low bytes */\n    x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */\n    x4 = x4 + ((uint32)x11 >> 8);  /* accumulate high bytes */\n\n    /****** process 8 pixels ******/\n    x10 = *((uint32*)(ref + 8)); /* D C B A */\n    x11 = *((uint32*)(ref + 12));   /* H G F E */\n    x12 = *((uint32*)(ref + 16));   /* L K J I */\n\n    x10 = ((uint32)x10 >> SHIFT); /* mvn x10, x10, lsr #24  = 0xFF 0xFF 0xFF ~D */\n    x10 = x10 | (x11 << (32 - SHIFT));        /* bic x10, x10, x11, lsl #8 = ~G ~F ~E ~D */\n    x11 = ((uint32)x11 >> SHIFT); /* 0xFF 0xFF 0xFF ~H */\n    x11 = x11 | (x12 << (32 - SHIFT));        /* ~K ~J ~I ~H */\n\n    x12 = *((uint32*)(blk + 8));\n    x14 = *((uint32*)(blk + 12));\n\n    /* process x11 & x14 */\n    x11 = sad_4pixel(x11, x14, x9);\n\n    /* process x12 & x10 */\n    x10 = sad_4pixel(x10, x12, x9);\n\n    x5 = x5 + x10; /* accumulate low bytes */\n    x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */\n    x4 = x4 + ((uint32)x10 >> 8);  /* accumulate high bytes */\n    x5 = x5 + x11;  /* accumulate low bytes */\n    x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */\n    x4 = x4 + ((uint32)x11 >> 8);  /* accumulate high bytes */\n\n    /****************/\n    x10 = x5 - (x4 << 8); /* extract low bytes */\n    x10 = x10 + x4;     /* add with high bytes */\n    x10 = x10 + (x10 << 16); /* add with lower half word */\n\n    if ((int)((uint32)x10 >> 16) <= dmin) /* compare with dmin */\n    {\n        if (--x8)\n        {\n#if (NUMBER==3)\n            goto         LOOP_SAD3;\n#elif (NUMBER==2)\n            goto         LOOP_SAD2;\n#elif (NUMBER==1)\n            goto         LOOP_SAD1;\n#endif\n        }\n\n    }\n\n    return ((uint32)x10 >> 16);\n}\n\n#elif defined(__CC_ARM)  /* only work with arm v5 */\n\n#if (NUMBER==3)\n__inline int32 sad_mb_offset3(uint8 *ref, uint8 *blk, int lx, int dmin, int32 x8)\n#elif (NUMBER==2)\n__inline int32 sad_mb_offset2(uint8 *ref, uint8 *blk, int lx, int dmin, int32 x8)\n#elif (NUMBER==1)\n__inline int32 sad_mb_offset1(uint8 *ref, uint8 *blk, int lx, int dmin, int32 x8)\n#endif\n{\n    int32 x4, x5, x6, x9, x10, x11, x12, x14;\n\n    x9 = 0x80808080; /* const. */\n    x4 = x5 = 0;\n\n    __asm{\n        MVN      x6, #0xff0000;\n#if (NUMBER==3)\nLOOP_SAD3:\n#elif (NUMBER==2)\nLOOP_SAD2:\n#elif (NUMBER==1)\nLOOP_SAD1:\n#endif\n        BIC      ref, ref, #3;\n    }\n    /****** process 8 pixels ******/\n    x11 = *((int32*)(ref + 12));\n    x12 = *((int32*)(ref + 16));\n    x10 = *((int32*)(ref + 8));\n    x14 = *((int32*)(blk + 12));\n\n    __asm{\n        MVN      x10, x10, lsr #SHIFT;\n        BIC      x10, x10, x11, lsl #(32-SHIFT);\n        MVN      x11, x11, lsr #SHIFT;\n        BIC      x11, x11, x12, lsl #(32-SHIFT);\n\n        LDR      x12, [blk, #8];\n    }\n\n    /* process x11 & x14 */\n    x11 = sad_4pixelN(x11, x14, x9);\n\n    /* process x12 & x10 */\n    x10 = sad_4pixelN(x10, x12, x9);\n\n    sum_accumulate;\n\n    __asm{\n        /****** process 8 pixels ******/\n        LDR      x11, [ref, #4];\n        LDR      x12, [ref, #8];\n        LDR  x10, [ref], lx ;\n        LDR  x14, [blk, #4];\n\n        MVN      x10, x10, lsr #SHIFT;\n        BIC      x10, x10, x11, lsl #(32-SHIFT);\n        MVN      x11, x11, lsr #SHIFT;\n        BIC      x11, x11, x12, lsl #(32-SHIFT);\n\n        LDR      x12, [blk], #16;\n    }\n\n    /* process x11 & x14 */\n    x11 = sad_4pixelN(x11, x14, x9);\n\n    /* process x12 & x10 */\n    x10 = sad_4pixelN(x10, x12, x9);\n\n    sum_accumulate;\n\n    /****************/\n    x10 = x5 - (x4 << 8); /* extract low bytes */\n    x10 = x10 + x4;     /* add with high bytes */\n    x10 = x10 + (x10 << 16); /* add with lower half word */\n\n    __asm{\n        RSBS     x11, dmin, x10, lsr #16\n        ADDLSS   x8, x8, #INC_X8\n#if (NUMBER==3)\n        BLS      LOOP_SAD3;\n#elif (NUMBER==2)\nBLS      LOOP_SAD2;\n#elif (NUMBER==1)\nBLS      LOOP_SAD1;\n#endif\n    }\n\n    return ((uint32)x10 >> 16);\n}\n\n#elif defined(__GNUC__) && defined(__arm__) /* ARM GNU COMPILER  */\n\n#if (NUMBER==3)\n__inline int32 sad_mb_offset3(uint8 *ref, uint8 *blk, int lx, int dmin)\n#elif (NUMBER==2)\n__inline int32 sad_mb_offset2(uint8 *ref, uint8 *blk, int lx, int dmin)\n#elif (NUMBER==1)\n__inline int32 sad_mb_offset1(uint8 *ref, uint8 *blk, int lx, int dmin)\n#endif\n{\n    int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;\n\n    x9 = 0x80808080; /* const. */\n    x4 = x5 = 0;\n    x8 = 16; //<<===========*******\n\n__asm__ volatile(\"MVN  %0, #0xFF0000\": \"=r\"(x6));\n\n#if (NUMBER==3)\nLOOP_SAD3:\n#elif (NUMBER==2)\nLOOP_SAD2:\n#elif (NUMBER==1)\nLOOP_SAD1:\n#endif\n__asm__ volatile(\"BIC  %0, %0, #3\": \"=r\"(ref));\n    /****** process 8 pixels ******/\n    x11 = *((int32*)(ref + 12));\n    x12 = *((int32*)(ref + 16));\n    x10 = *((int32*)(ref + 8));\n    x14 = *((int32*)(blk + 12));\n\n#if (SHIFT==8)\n__asm__ volatile(\"MVN   %0, %0, lsr #8\\n\\tBIC   %0, %0, %1,lsl #24\\n\\tMVN   %1, %1,lsr #8\\n\\tBIC   %1, %1, %2,lsl #24\": \"=&r\"(x10), \"=&r\"(x11): \"r\"(x12));\n#elif (SHIFT==16)\n__asm__ volatile(\"MVN   %0, %0, lsr #16\\n\\tBIC   %0, %0, %1,lsl #16\\n\\tMVN   %1, %1,lsr #16\\n\\tBIC   %1, %1, %2,lsl #16\": \"=&r\"(x10), \"=&r\"(x11): \"r\"(x12));\n#elif (SHIFT==24)\n__asm__ volatile(\"MVN   %0, %0, lsr #24\\n\\tBIC   %0, %0, %1,lsl #8\\n\\tMVN   %1, %1,lsr #24\\n\\tBIC   %1, %1, %2,lsl #8\": \"=&r\"(x10), \"=&r\"(x11): \"r\"(x12));\n#endif\n\n    x12 = *((int32*)(blk + 8));\n\n    /* process x11 & x14 */\n    x11 = sad_4pixelN(x11, x14, x9);\n\n    /* process x12 & x10 */\n    x10 = sad_4pixelN(x10, x12, x9);\n\n    sum_accumulate;\n\n    /****** process 8 pixels ******/\n    x11 = *((int32*)(ref + 4));\n    x12 = *((int32*)(ref + 8));\n    x10 = *((int32*)ref); ref += lx;\n    x14 = *((int32*)(blk + 4));\n\n#if (SHIFT==8)\n__asm__ volatile(\"MVN   %0, %0, lsr #8\\n\\tBIC   %0, %0, %1,lsl #24\\n\\tMVN   %1, %1,lsr #8\\n\\tBIC   %1, %1, %2,lsl #24\": \"=&r\"(x10), \"=&r\"(x11): \"r\"(x12));\n#elif (SHIFT==16)\n__asm__ volatile(\"MVN   %0, %0, lsr #16\\n\\tBIC   %0, %0, %1,lsl #16\\n\\tMVN   %1, %1,lsr #16\\n\\tBIC   %1, %1, %2,lsl #16\": \"=&r\"(x10), \"=&r\"(x11): \"r\"(x12));\n#elif (SHIFT==24)\n__asm__ volatile(\"MVN   %0, %0, lsr #24\\n\\tBIC   %0, %0, %1,lsl #8\\n\\tMVN   %1, %1,lsr #24\\n\\tBIC   %1, %1, %2,lsl #8\": \"=&r\"(x10), \"=&r\"(x11): \"r\"(x12));\n#endif\n__asm__ volatile(\"LDR   %0, [%1], #16\": \"=&r\"(x12), \"=r\"(blk));\n\n    /* process x11 & x14 */\n    x11 = sad_4pixelN(x11, x14, x9);\n\n    /* process x12 & x10 */\n    x10 = sad_4pixelN(x10, x12, x9);\n\n    sum_accumulate;\n\n    /****************/\n    x10 = x5 - (x4 << 8); /* extract low bytes */\n    x10 = x10 + x4;     /* add with high bytes */\n    x10 = x10 + (x10 << 16); /* add with lower half word */\n\n    if (((uint32)x10 >> 16) <= (uint32)dmin) /* compare with dmin */\n    {\n        if (--x8)\n        {\n#if (NUMBER==3)\n            goto         LOOP_SAD3;\n#elif (NUMBER==2)\ngoto         LOOP_SAD2;\n#elif (NUMBER==1)\ngoto         LOOP_SAD1;\n#endif\n        }\n\n    }\n\n    return ((uint32)x10 >> 16);\n}\n\n#endif\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/slice.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"oscl_mem.h\"\n#include \"avcenc_lib.h\"\n\n\nAVCEnc_Status AVCEncodeSlice(AVCEncObject *encvid)\n{\n    AVCEnc_Status status = AVCENC_SUCCESS;\n    AVCCommonObj *video = encvid->common;\n    AVCPicParamSet *pps = video->currPicParams;\n    AVCSliceHeader *sliceHdr = video->sliceHdr;\n    AVCMacroblock *currMB ;\n    AVCEncBitstream *stream = encvid->bitstream;\n    uint slice_group_id;\n    int CurrMbAddr, slice_type;\n\n    slice_type = video->slice_type;\n\n    /* set the first mb in slice */\n    video->mbNum = CurrMbAddr = sliceHdr->first_mb_in_slice;// * (1+video->MbaffFrameFlag);\n    slice_group_id = video->MbToSliceGroupMap[CurrMbAddr];\n\n    video->mb_skip_run = 0;\n\n    /* while loop , see subclause 7.3.4 */\n    while (1)\n    {\n        video->mbNum = CurrMbAddr;\n        currMB = video->currMB = &(video->mblock[CurrMbAddr]);\n        currMB->slice_id = video->slice_id;  // for deblocking\n\n        video->mb_x = CurrMbAddr % video->PicWidthInMbs;\n        video->mb_y = CurrMbAddr / video->PicWidthInMbs;\n\n        /* initialize QP for this MB here*/\n        /* calculate currMB->QPy */\n        RCInitMBQP(encvid);\n\n        /* check the availability of neighboring macroblocks */\n        InitNeighborAvailability(video, CurrMbAddr);\n\n        /* Assuming that InitNeighborAvailability has been called prior to this function */\n        video->intraAvailA = video->intraAvailB = video->intraAvailC = video->intraAvailD = 0;\n        /* this is necessary for all subsequent intra search */\n\n        if (!video->currPicParams->constrained_intra_pred_flag)\n        {\n            video->intraAvailA = video->mbAvailA;\n            video->intraAvailB = video->mbAvailB;\n            video->intraAvailC = video->mbAvailC;\n            video->intraAvailD = video->mbAvailD;\n        }\n        else\n        {\n            if (video->mbAvailA)\n            {\n                video->intraAvailA = video->mblock[video->mbAddrA].mb_intra;\n            }\n            if (video->mbAvailB)\n            {\n                video->intraAvailB = video->mblock[video->mbAddrB].mb_intra ;\n            }\n            if (video->mbAvailC)\n            {\n                video->intraAvailC = video->mblock[video->mbAddrC].mb_intra;\n            }\n            if (video->mbAvailD)\n            {\n                video->intraAvailD = video->mblock[video->mbAddrD].mb_intra;\n            }\n        }\n\n        /* encode_one_macroblock() */\n        status = EncodeMB(encvid);\n        if (status != AVCENC_SUCCESS)\n        {\n            break;\n        }\n\n        /* go to next MB */\n        CurrMbAddr++;\n\n        while ((uint)video->MbToSliceGroupMap[CurrMbAddr] != slice_group_id &&\n                (uint)CurrMbAddr < video->PicSizeInMbs)\n        {\n            CurrMbAddr++;\n        }\n\n        if ((uint)CurrMbAddr >= video->PicSizeInMbs)\n        {\n            /* end of slice, return, but before that check to see if there are other slices\n            to be encoded. */\n            encvid->currSliceGroup++;\n            if (encvid->currSliceGroup > (int)pps->num_slice_groups_minus1) /* no more slice group */\n            {\n                status = AVCENC_PICTURE_READY;\n                break;\n            }\n            else\n            {\n                /* find first_mb_num for the next slice */\n                CurrMbAddr = 0;\n                while (video->MbToSliceGroupMap[CurrMbAddr] != encvid->currSliceGroup &&\n                        (uint)CurrMbAddr < video->PicSizeInMbs)\n                {\n                    CurrMbAddr++;\n                }\n                if ((uint)CurrMbAddr >= video->PicSizeInMbs)\n                {\n                    status = AVCENC_SLICE_EMPTY; /* error, one slice group has no MBs in it */\n                }\n\n                video->mbNum = CurrMbAddr;\n                status = AVCENC_SUCCESS;\n                break;\n            }\n        }\n    }\n\n    if (video->mb_skip_run > 0)\n    {\n        /* write skip_run */\n        if (slice_type != AVC_I_SLICE && slice_type != AVC_SI_SLICE)\n        {\n            ue_v(stream, video->mb_skip_run);\n            video->mb_skip_run = 0;\n        }\n        else    /* shouldn't happen */\n        {\n            status = AVCENC_FAIL;\n        }\n    }\n\n    return status;\n}\n\n\nAVCEnc_Status EncodeMB(AVCEncObject *encvid)\n{\n    AVCEnc_Status status = AVCENC_SUCCESS;\n    AVCCommonObj *video = encvid->common;\n    AVCPictureData *currPic = video->currPic;\n    AVCFrameIO  *currInput = encvid->currInput;\n    AVCMacroblock *currMB = video->currMB;\n    AVCMacroblock *MB_A, *MB_B;\n    AVCEncBitstream *stream = encvid->bitstream;\n    AVCRateControl *rateCtrl = encvid->rateCtrl;\n    uint8 *cur, *curL, *curCb, *curCr;\n    uint8 *orgL, *orgCb, *orgCr, *org4;\n    int CurrMbAddr = video->mbNum;\n    int picPitch = currPic->pitch;\n    int orgPitch = currInput->pitch;\n    int x_position = (video->mb_x << 4);\n    int y_position = (video->mb_y << 4);\n    int offset;\n    int b8, b4, blkidx;\n    AVCResidualType  resType;\n    int slice_type;\n    int numcoeff; /* output from residual_block_cavlc */\n    int cost16, cost8;\n\n    int num_bits, start_mb_bits, start_text_bits;\n\n    slice_type = video->slice_type;\n\n    /* now, point to the reconstructed frame */\n    offset = y_position * picPitch + x_position;\n    curL = currPic->Sl + offset;\n    orgL = currInput->YCbCr[0] + offset;\n    offset = (offset + x_position) >> 2;\n    curCb = currPic->Scb + offset;\n    curCr = currPic->Scr + offset;\n    orgCb = currInput->YCbCr[1] + offset;\n    orgCr = currInput->YCbCr[2] + offset;\n\n    if (orgPitch != picPitch)\n    {\n        offset = y_position * (orgPitch - picPitch);\n        orgL += offset;\n        offset >>= 2;\n        orgCb += offset;\n        orgCr += offset;\n    }\n\n    /******* determine MB prediction mode *******/\n    if (encvid->intraSearch[CurrMbAddr])\n    {\n        MBIntraSearch(encvid, CurrMbAddr, curL, picPitch);\n    }\n    /******* This part should be determined somehow ***************/\n    if (currMB->mbMode == AVC_I_PCM)\n    {\n        /* write down mb_type and PCM data */\n        /* and copy from currInput to currPic */\n        status = EncodeIntraPCM(encvid);\n\n\n        return status;\n    }\n\n    /****** for intra prediction, pred is already done *******/\n    /****** for I4, the recon is ready and Xfrm coefs are ready to be encoded *****/\n\n    //RCCalculateMAD(encvid,currMB,orgL,orgPitch); // no need to re-calculate MAD for Intra\n    // not used since totalSAD is used instead\n\n    /* compute the prediction */\n    /* output is video->pred_block */\n    if (!currMB->mb_intra)\n    {\n        AVCMBMotionComp(encvid, video); /* perform prediction and residue calculation */\n        /* we can do the loop here and call dct_luma */\n        video->pred_pitch = picPitch;\n        currMB->CBP = 0;\n        cost16 = 0;\n        cur = curL;\n        org4 = orgL;\n\n        for (b8 = 0; b8 < 4; b8++)\n        {\n            cost8 = 0;\n\n            for (b4 = 0; b4 < 4; b4++)\n            {\n                blkidx = blkIdx2blkXY[b8][b4];\n                video->pred_block = cur;\n                numcoeff = dct_luma(encvid, blkidx, cur, org4, &cost8);\n                currMB->nz_coeff[blkidx] = numcoeff;\n                if (numcoeff)\n                {\n                    video->cbp4x4 |= (1 << blkidx);\n                    currMB->CBP |= (1 << b8);\n                }\n\n                if (b4&1)\n                {\n                    cur += ((picPitch << 2) - 4);\n                    org4 += ((orgPitch << 2) - 4);\n                }\n                else\n                {\n                    cur += 4;\n                    org4 += 4;\n                }\n            }\n\n            /* move the IDCT part out of dct_luma to accommodate the check\n               for coeff_cost. */\n\n            if ((currMB->CBP&(1 << b8)) && (cost8 <= _LUMA_COEFF_COST_))\n            {\n                cost8 = 0; // reset it\n\n                currMB->CBP ^= (1 << b8);\n                blkidx = blkIdx2blkXY[b8][0];\n\n                currMB->nz_coeff[blkidx] = 0;\n                currMB->nz_coeff[blkidx+1] = 0;\n                currMB->nz_coeff[blkidx+4] = 0;\n                currMB->nz_coeff[blkidx+5] = 0;\n            }\n\n            cost16 += cost8;\n\n            if (b8&1)\n            {\n                cur -= 8;\n                org4 -= 8;\n            }\n            else\n            {\n                cur += (8 - (picPitch << 3));\n                org4 += (8 - (orgPitch << 3));\n            }\n        }\n\n        /* after the whole MB, we do another check for coeff_cost */\n        if ((currMB->CBP&0xF) && (cost16 <= _LUMA_MB_COEFF_COST_))\n        {\n            currMB->CBP = 0;  // reset it to zero\n            oscl_memset(currMB->nz_coeff, 0, sizeof(uint8)*16);\n        }\n\n        // now we do IDCT\n        MBInterIdct(video, curL, currMB, picPitch);\n\n//      video->pred_block = video->pred + 256;\n    }\n    else    /* Intra prediction */\n    {\n        encvid->numIntraMB++;\n\n        if (currMB->mbMode == AVC_I16) /* do prediction for the whole macroblock */\n        {\n            currMB->CBP = 0;\n            /* get the prediction from encvid->pred_i16 */\n            dct_luma_16x16(encvid, curL, orgL);\n        }\n        video->pred_block = encvid->pred_ic[currMB->intra_chroma_pred_mode];\n    }\n\n    /* chrominance */\n    /* not need to do anything, the result is in encvid->pred_ic\n    chroma dct must be aware that prediction block can come from either intra or inter. */\n\n    dct_chroma(encvid, curCb, orgCb, 0);\n\n    dct_chroma(encvid, curCr, orgCr, 1);\n\n\n    /* 4.1 if there's nothing in there, video->mb_skip_run++ */\n    /* 4.2 if coded, check if there is a run of skipped MB, encodes it,\n            set video->QPyprev = currMB->QPy; */\n\n    /* 5. vlc encode */\n\n    /* check for skipped macroblock, INTER only */\n    if (!currMB->mb_intra)\n    {\n        /* decide whether this MB (for inter MB) should be skipped if there's nothing left. */\n        if (!currMB->CBP && currMB->NumMbPart == 1 && currMB->QPy == video->QPy)\n        {\n            if (currMB->MBPartPredMode[0][0] == AVC_Pred_L0 && currMB->ref_idx_L0[0] == 0)\n            {\n                MB_A = &video->mblock[video->mbAddrA];\n                MB_B = &video->mblock[video->mbAddrB];\n\n                if (!video->mbAvailA || !video->mbAvailB)\n                {\n                    if (currMB->mvL0[0] == 0) /* both mv components are zeros.*/\n                    {\n                        currMB->mbMode = AVC_SKIP;\n                        video->mvd_l0[0][0][0] = 0;\n                        video->mvd_l0[0][0][1] = 0;\n                    }\n                }\n                else\n                {\n                    if ((MB_A->ref_idx_L0[1] == 0 && MB_A->mvL0[3] == 0) ||\n                            (MB_B->ref_idx_L0[2] == 0 && MB_B->mvL0[12] == 0))\n                    {\n                        if (currMB->mvL0[0] == 0) /* both mv components are zeros.*/\n                        {\n                            currMB->mbMode = AVC_SKIP;\n                            video->mvd_l0[0][0][0] = 0;\n                            video->mvd_l0[0][0][1] = 0;\n                        }\n                    }\n                    else if (video->mvd_l0[0][0][0] == 0 && video->mvd_l0[0][0][1] == 0)\n                    {\n                        currMB->mbMode = AVC_SKIP;\n                    }\n                }\n            }\n\n            if (currMB->mbMode == AVC_SKIP)\n            {\n                video->mb_skip_run++;\n\n                /* set parameters */\n                /* not sure whether we need the followings */\n                if (slice_type == AVC_P_SLICE)\n                {\n                    currMB->mbMode = AVC_SKIP;\n                    currMB->MbPartWidth = currMB->MbPartHeight = 16;\n                    currMB->MBPartPredMode[0][0] = AVC_Pred_L0;\n                    currMB->NumMbPart = 1;\n                    currMB->NumSubMbPart[0] = currMB->NumSubMbPart[1] =\n                                                  currMB->NumSubMbPart[2] = currMB->NumSubMbPart[3] = 1;\n                    currMB->SubMbPartWidth[0] = currMB->SubMbPartWidth[1] =\n                                                    currMB->SubMbPartWidth[2] = currMB->SubMbPartWidth[3] = currMB->MbPartWidth;\n                    currMB->SubMbPartHeight[0] = currMB->SubMbPartHeight[1] =\n                                                     currMB->SubMbPartHeight[2] = currMB->SubMbPartHeight[3] = currMB->MbPartHeight;\n\n                }\n                else if (slice_type == AVC_B_SLICE)\n                {\n                    currMB->mbMode = AVC_SKIP;\n                    currMB->MbPartWidth = currMB->MbPartHeight = 8;\n                    currMB->MBPartPredMode[0][0] = AVC_Direct;\n                    currMB->NumMbPart = -1;\n                }\n\n                /* for skipped MB, always look at the first entry in RefPicList */\n                currMB->RefIdx[0] = currMB->RefIdx[1] =\n                                        currMB->RefIdx[2] = currMB->RefIdx[3] = video->RefPicList0[0]->RefIdx;\n\n                /* do not return yet, need to do some copies */\n            }\n        }\n    }\n    /* non-skipped MB */\n\n\n    /************* START ENTROPY CODING *************************/\n\n    start_mb_bits = 32 + (encvid->bitstream->write_pos << 3) - encvid->bitstream->bit_left;\n\n    /* encode mb_type, mb_pred, sub_mb_pred, CBP */\n    if (slice_type != AVC_I_SLICE && slice_type != AVC_SI_SLICE && currMB->mbMode != AVC_SKIP)\n    {\n        //if(!pps->entropy_coding_mode_flag)  ALWAYS true\n        {\n            ue_v(stream, video->mb_skip_run);\n            video->mb_skip_run = 0;\n        }\n    }\n\n    if (currMB->mbMode != AVC_SKIP)\n    {\n        status = EncodeMBHeader(currMB, encvid);\n        if (status != AVCENC_SUCCESS)\n        {\n            return status;\n        }\n    }\n\n    start_text_bits = 32 + (encvid->bitstream->write_pos << 3) - encvid->bitstream->bit_left;\n\n    /**** now decoding part *******/\n    resType = AVC_Luma;\n\n    /* DC transform for luma I16 mode */\n    if (currMB->mbMode == AVC_I16)\n    {\n        /* vlc encode level/run */\n        status = enc_residual_block(encvid, AVC_Intra16DC, encvid->numcoefdc, currMB);\n        if (status != AVCENC_SUCCESS)\n        {\n            return status;\n        }\n        resType = AVC_Intra16AC;\n    }\n\n    /* VLC encoding for luma */\n    for (b8 = 0; b8 < 4; b8++)\n    {\n        if (currMB->CBP&(1 << b8))\n        {\n            for (b4 = 0; b4 < 4; b4++)\n            {\n                /* vlc encode level/run */\n                status = enc_residual_block(encvid, resType, (b8 << 2) + b4, currMB);\n                if (status != AVCENC_SUCCESS)\n                {\n                    return status;\n                }\n            }\n        }\n    }\n\n    /* chroma */\n    if (currMB->CBP & (3 << 4)) /* chroma DC residual present */\n    {\n        for (b8 = 0; b8 < 2; b8++) /* for iCbCr */\n        {\n            /* vlc encode level/run */\n            status = enc_residual_block(encvid, AVC_ChromaDC, encvid->numcoefcdc[b8] + (b8 << 3), currMB);\n            if (status != AVCENC_SUCCESS)\n            {\n                return status;\n            }\n        }\n    }\n\n    if (currMB->CBP & (2 << 4))\n    {\n        /* AC part */\n        for (b8 = 0; b8 < 2; b8++) /* for iCbCr */\n        {\n            for (b4 = 0; b4 < 4; b4++)  /* for each block inside Cb or Cr */\n            {\n                /* vlc encode level/run */\n                status = enc_residual_block(encvid, AVC_ChromaAC, 16 + (b8 << 2) + b4, currMB);\n                if (status != AVCENC_SUCCESS)\n                {\n                    return status;\n                }\n            }\n        }\n    }\n\n\n    num_bits = 32 + (encvid->bitstream->write_pos << 3) - encvid->bitstream->bit_left;\n\n    RCPostMB(video, rateCtrl, start_text_bits - start_mb_bits,\n             num_bits - start_text_bits);\n\n//  num_bits -= start_mb_bits;\n//  fprintf(fdebug,\"MB #%d: %d bits\\n\",CurrMbAddr,num_bits);\n//  fclose(fdebug);\n    return status;\n}\n\n/* copy the content from predBlock back to the reconstructed YUV frame */\nvoid Copy_MB(uint8 *curL, uint8 *curCb, uint8 *curCr, uint8 *predBlock, int picPitch)\n{\n    int j, offset;\n    uint32 *dst, *dst2, *src;\n\n    dst = (uint32*)curL;\n    src = (uint32*)predBlock;\n\n    offset = (picPitch - 16) >> 2;\n\n    for (j = 0; j < 16; j++)\n    {\n        *dst++ = *src++;\n        *dst++ = *src++;\n        *dst++ = *src++;\n        *dst++ = *src++;\n\n        dst += offset;\n    }\n\n    dst = (uint32*)curCb;\n    dst2 = (uint32*)curCr;\n    offset >>= 1;\n\n    for (j = 0; j < 8; j++)\n    {\n        *dst++ = *src++;\n        *dst++ = *src++;\n        *dst2++ = *src++;\n        *dst2++ = *src++;\n\n        dst += offset;\n        dst2 += offset;\n    }\n    return ;\n}\n\n/* encode mb_type, mb_pred, sub_mb_pred, CBP */\n/* decide whether this MB (for inter MB) should be skipped */\nAVCEnc_Status EncodeMBHeader(AVCMacroblock *currMB, AVCEncObject *encvid)\n{\n    AVCEnc_Status status = AVCENC_SUCCESS;\n    uint mb_type;\n    AVCCommonObj *video = encvid->common;\n    AVCEncBitstream *stream = encvid->bitstream;\n\n    if (currMB->CBP > 47)   /* chroma CBP is 11 */\n    {\n        currMB->CBP -= 16;  /* remove the 5th bit from the right */\n    }\n\n    mb_type = InterpretMBType(currMB, video->slice_type);\n\n    status = ue_v(stream, mb_type);\n\n    if (currMB->mbMode == AVC_P8 || currMB->mbMode == AVC_P8ref0)\n    {\n        status = sub_mb_pred(video, currMB, stream);\n    }\n    else\n    {\n        status = mb_pred(video, currMB, stream) ;\n    }\n\n    if (currMB->mbMode != AVC_I16)\n    {\n        /* decode coded_block_pattern */\n        status = EncodeCBP(currMB, stream);\n    }\n\n    /* calculate currMB->mb_qp_delta = currMB->QPy - video->QPyprev */\n    if (currMB->CBP > 0 || currMB->mbMode == AVC_I16)\n    {\n        status = se_v(stream, currMB->QPy - video->QPy);\n        video->QPy = currMB->QPy; /* = (video->QPyprev + currMB->mb_qp_delta + 52)%52; */\n        // no need video->QPc = currMB->QPc;\n    }\n    else\n    {\n        if (currMB->QPy != video->QPy) // current QP is not the same as previous QP\n        {\n            /* restore these values */\n            RCRestoreQP(currMB, video, encvid);\n        }\n    }\n\n    return status;\n}\n\n\n/* inputs are mbMode, mb_intra, i16Mode, CBP, NumMbPart, MbPartWidth, MbPartHeight */\nuint InterpretMBType(AVCMacroblock *currMB, int slice_type)\n{\n    int CBP_chrom;\n    int mb_type;// part1, part2, part3;\n//  const static int MapParts2Type[2][3][3]={{{4,8,12},{10,6,14},{16,18,20}},\n//  {{5,9,13},{11,7,15},{17,19,21}}};\n\n    if (currMB->mb_intra)\n    {\n        if (currMB->mbMode == AVC_I4)\n        {\n            mb_type = 0;\n        }\n        else if (currMB->mbMode == AVC_I16)\n        {\n            CBP_chrom = (currMB->CBP & 0x30);\n            if (currMB->CBP&0xF)\n            {\n                currMB->CBP |= 0xF;  /* either 0x0 or 0xF */\n                mb_type = 13;\n            }\n            else\n            {\n                mb_type = 1;\n            }\n            mb_type += (CBP_chrom >> 2) + currMB->i16Mode;\n        }\n        else /* if(currMB->mbMode == AVC_I_PCM) */\n        {\n            mb_type = 25;\n        }\n    }\n    else\n    {  /* P-MB *//* note that the order of the enum AVCMBMode cannot be changed\n        since we use it here. */\n        mb_type = currMB->mbMode - AVC_P16;\n    }\n\n    if (slice_type == AVC_P_SLICE)\n    {\n        if (currMB->mb_intra)\n        {\n            mb_type += 5;\n        }\n    }\n    // following codes have not been tested yet, not needed.\n    /*  else if(slice_type == AVC_B_SLICE)\n        {\n            if(currMB->mbMode == AVC_BDirect16)\n            {\n                mb_type = 0;\n            }\n            else if(currMB->mbMode == AVC_P16)\n            {\n                mb_type = currMB->MBPartPredMode[0][0] + 1; // 1 or 2\n            }\n            else if(currMB->mbMode == AVC_P8)\n            {\n                mb_type = 26;\n            }\n            else if(currMB->mbMode == AVC_P8ref0)\n            {\n                mb_type = 27;\n            }\n            else\n            {\n                part1 = currMB->mbMode - AVC_P16x8;\n                part2 = currMB->MBPartPredMode[0][0];\n                part3 = currMB->MBPartPredMode[1][0];\n                mb_type = MapParts2Type[part1][part2][part3];\n            }\n        }\n\n        if(slice_type == AVC_SI_SLICE)\n        {\n            mb_type++;\n        }\n    */\n    return (uint)mb_type;\n}\n\n//const static int mbPart2raster[3][4] = {{0,0,0,0},{1,1,0,0},{1,0,1,0}};\n\n/* see subclause 7.3.5.1 */\nAVCEnc_Status mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream)\n{\n    AVCEnc_Status status = AVCENC_SUCCESS;\n    int mbPartIdx;\n    AVCSliceHeader *sliceHdr = video->sliceHdr;\n    int max_ref_idx;\n    uint code;\n\n    if (currMB->mbMode == AVC_I4 || currMB->mbMode == AVC_I16)\n    {\n        if (currMB->mbMode == AVC_I4)\n        {\n            /* perform prediction to get the actual intra 4x4 pred mode */\n            EncodeIntra4x4Mode(video, currMB, stream);\n            /* output will be in currMB->i4Mode[4][4] */\n        }\n\n        /* assume already set from MBPrediction() */\n        status = ue_v(stream, currMB->intra_chroma_pred_mode);\n    }\n    else if (currMB->MBPartPredMode[0][0] != AVC_Direct)\n    {\n\n        oscl_memset(currMB->ref_idx_L0, 0, sizeof(int16)*4);\n\n        /* see subclause 7.4.5.1 for the range of ref_idx_lX */\n        max_ref_idx = sliceHdr->num_ref_idx_l0_active_minus1;\n        /*      if(video->MbaffFrameFlag && currMB->mb_field_decoding_flag)\n                    max_ref_idx = 2*sliceHdr->num_ref_idx_l0_active_minus1 + 1;\n        */\n        /* decode ref index for L0 */\n        if (sliceHdr->num_ref_idx_l0_active_minus1 > 0)\n        {\n            for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)\n            {\n                if (/*(sliceHdr->num_ref_idx_l0_active_minus1>0 || currMB->mb_field_decoding_flag) &&*/\n                    currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L1)\n                {\n                    code = currMB->ref_idx_L0[mbPartIdx];\n                    status = te_v(stream, code, max_ref_idx);\n                }\n            }\n        }\n\n        /* see subclause 7.4.5.1 for the range of ref_idx_lX */\n        max_ref_idx = sliceHdr->num_ref_idx_l1_active_minus1;\n        /*      if(video->MbaffFrameFlag && currMB->mb_field_decoding_flag)\n                    max_ref_idx = 2*sliceHdr->num_ref_idx_l1_active_minus1 + 1;\n        */\n        /* decode ref index for L1 */\n        if (sliceHdr->num_ref_idx_l1_active_minus1 > 0)\n        {\n            for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)\n            {\n                if (/*(sliceHdr->num_ref_idx_l1_active_minus1>0 || currMB->mb_field_decoding_flag) &&*/\n                    currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L0)\n                {\n                    status = te_v(stream, currMB->ref_idx_L1[mbPartIdx], max_ref_idx);\n                }\n            }\n        }\n\n        /* encode mvd_l0 */\n        for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)\n        {\n            if (currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L1)\n            {\n                status = se_v(stream, video->mvd_l0[mbPartIdx][0][0]);\n                status = se_v(stream, video->mvd_l0[mbPartIdx][0][1]);\n            }\n        }\n        /* encode mvd_l1 */\n        for (mbPartIdx = 0; mbPartIdx < currMB->NumMbPart; mbPartIdx++)\n        {\n            if (currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L0)\n            {\n                status = se_v(stream, video->mvd_l1[mbPartIdx][0][0]);\n                status = se_v(stream, video->mvd_l1[mbPartIdx][0][1]);\n            }\n        }\n    }\n\n    return status;\n}\n\n/* see subclause 7.3.5.2 */\nAVCEnc_Status sub_mb_pred(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream)\n{\n    AVCEnc_Status status = AVCENC_SUCCESS;\n    int mbPartIdx, subMbPartIdx;\n    AVCSliceHeader *sliceHdr = video->sliceHdr;\n    uint max_ref_idx;\n    uint slice_type = video->slice_type;\n    uint sub_mb_type[4];\n\n    /* this should move somewhere else where we don't have to make this check */\n    if (currMB->mbMode == AVC_P8ref0)\n    {\n        oscl_memset(currMB->ref_idx_L0, 0, sizeof(int16)*4);\n    }\n\n    /* we have to check the values to make sure they are valid  */\n    /* assign values to currMB->sub_mb_type[] */\n    if (slice_type == AVC_P_SLICE)\n    {\n        InterpretSubMBTypeP(currMB, sub_mb_type);\n    }\n    /* no need to check for B-slice\n        else if(slice_type == AVC_B_SLICE)\n        {\n            InterpretSubMBTypeB(currMB,sub_mb_type);\n        }*/\n\n    for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)\n    {\n        status = ue_v(stream, sub_mb_type[mbPartIdx]);\n    }\n\n    /* see subclause 7.4.5.1 for the range of ref_idx_lX */\n    max_ref_idx = sliceHdr->num_ref_idx_l0_active_minus1;\n    /*  if(video->MbaffFrameFlag && currMB->mb_field_decoding_flag)\n            max_ref_idx = 2*sliceHdr->num_ref_idx_l0_active_minus1 + 1; */\n\n    for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)\n    {\n        if ((sliceHdr->num_ref_idx_l0_active_minus1 > 0 /*|| currMB->mb_field_decoding_flag*/) &&\n                currMB->mbMode != AVC_P8ref0 && /*currMB->subMbMode[mbPartIdx]!=AVC_BDirect8 &&*/\n                currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L1)\n        {\n            status = te_v(stream, currMB->ref_idx_L0[mbPartIdx], max_ref_idx);\n        }\n        /* used in deblocking */\n        currMB->RefIdx[mbPartIdx] = video->RefPicList0[currMB->ref_idx_L0[mbPartIdx]]->RefIdx;\n    }\n    /* see subclause 7.4.5.1 for the range of ref_idx_lX */\n    max_ref_idx = sliceHdr->num_ref_idx_l1_active_minus1;\n    /*  if(video->MbaffFrameFlag && currMB->mb_field_decoding_flag)\n            max_ref_idx = 2*sliceHdr->num_ref_idx_l1_active_minus1 + 1;*/\n\n    if (sliceHdr->num_ref_idx_l1_active_minus1 > 0)\n    {\n        for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)\n        {\n            if (/*(sliceHdr->num_ref_idx_l1_active_minus1>0 || currMB->mb_field_decoding_flag) &&*/\n                /*currMB->subMbMode[mbPartIdx]!=AVC_BDirect8 &&*/\n                currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L0)\n            {\n                status = te_v(stream, currMB->ref_idx_L1[mbPartIdx], max_ref_idx);\n            }\n        }\n    }\n\n    for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)\n    {\n        if (/*currMB->subMbMode[mbPartIdx]!=AVC_BDirect8 &&*/\n            currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L1)\n        {\n            for (subMbPartIdx = 0; subMbPartIdx < currMB->NumSubMbPart[mbPartIdx]; subMbPartIdx++)\n            {\n                status = se_v(stream, video->mvd_l0[mbPartIdx][subMbPartIdx][0]);\n                status = se_v(stream, video->mvd_l0[mbPartIdx][subMbPartIdx][1]);\n            }\n        }\n    }\n\n    for (mbPartIdx = 0; mbPartIdx < 4; mbPartIdx++)\n    {\n        if (/*currMB->subMbMode[mbPartIdx]!=AVC_BDirect8 &&*/\n            currMB->MBPartPredMode[mbPartIdx][0] != AVC_Pred_L0)\n        {\n            for (subMbPartIdx = 0; subMbPartIdx < currMB->NumSubMbPart[mbPartIdx]; subMbPartIdx++)\n            {\n                status = se_v(stream, video->mvd_l1[mbPartIdx][subMbPartIdx][0]);\n                status = se_v(stream, video->mvd_l1[mbPartIdx][subMbPartIdx][1]);\n            }\n        }\n    }\n\n    return status;\n}\n\n/* input is mblock->sub_mb_type[] */\nvoid InterpretSubMBTypeP(AVCMacroblock *mblock, uint *sub_mb_type)\n{\n    int i;\n    /* see enum AVCMBType declaration */\n    /*const static AVCSubMBMode map2subMbMode[4] = {AVC_8x8,AVC_8x4,AVC_4x8,AVC_4x4};\n    const static int map2subPartWidth[4] = {8,8,4,4};\n    const static int map2subPartHeight[4] = {8,4,8,4};\n    const static int map2numSubPart[4] = {1,2,2,4};*/\n\n    for (i = 0; i < 4 ; i++)\n    {\n        sub_mb_type[i] = mblock->subMbMode[i] - AVC_8x8;\n    }\n\n    return ;\n}\n\nvoid InterpretSubMBTypeB(AVCMacroblock *mblock, uint *sub_mb_type)\n{\n    int i;\n    /* see enum AVCMBType declaration */\n    /*  const static AVCSubMBMode map2subMbMode[13] = {AVC_BDirect8,AVC_8x8,AVC_8x8,\n            AVC_8x8,AVC_8x4,AVC_4x8,AVC_8x4,AVC_4x8,AVC_8x4,AVC_4x8,AVC_4x4,AVC_4x4,AVC_4x4};\n        const static int map2subPartWidth[13] = {4,8,8,8,8,4,8,4,8,4,4,4,4};\n        const static int map2subPartHeight[13] = {4,8,8,8,4,8,4,8,4,8,4,4,4};\n        const static int map2numSubPart[13] = {4,1,1,1,2,2,2,2,2,2,4,4,4};\n        const static int map2predMode[13] = {3,0,1,2,0,0,1,1,2,2,0,1,2};*/\n\n    for (i = 0; i < 4 ; i++)\n    {\n        if (mblock->subMbMode[i] == AVC_BDirect8)\n        {\n            sub_mb_type[i] = 0;\n        }\n        else if (mblock->subMbMode[i] == AVC_8x8)\n        {\n            sub_mb_type[i] = 1 + mblock->MBPartPredMode[i][0];\n        }\n        else if (mblock->subMbMode[i] == AVC_4x4)\n        {\n            sub_mb_type[i] = 10 + mblock->MBPartPredMode[i][0];\n        }\n        else\n        {\n            sub_mb_type[i] = 4 + (mblock->MBPartPredMode[i][0] << 1) + (mblock->subMbMode[i] - AVC_8x4);\n        }\n    }\n\n    return ;\n}\n\n/* see subclause 8.3.1 */\nAVCEnc_Status EncodeIntra4x4Mode(AVCCommonObj *video, AVCMacroblock *currMB, AVCEncBitstream *stream)\n{\n    int intra4x4PredModeA = 0;\n    int intra4x4PredModeB, predIntra4x4PredMode;\n    int component, SubBlock_indx, block_x, block_y;\n    int dcOnlyPredictionFlag;\n    uint    flag;\n    int     rem = 0;\n    int     mode;\n    int bindx = 0;\n\n    for (component = 0; component < 4; component++) /* partition index */\n    {\n        block_x = ((component & 1) << 1);\n        block_y = ((component >> 1) << 1);\n\n        for (SubBlock_indx = 0; SubBlock_indx < 4; SubBlock_indx++) /* sub-partition index */\n        {\n            dcOnlyPredictionFlag = 0;\n            if (block_x > 0)\n            {\n                intra4x4PredModeA = currMB->i4Mode[(block_y << 2) + block_x - 1 ];\n            }\n            else\n            {\n                if (video->intraAvailA)\n                {\n                    if (video->mblock[video->mbAddrA].mbMode == AVC_I4)\n                    {\n                        intra4x4PredModeA = video->mblock[video->mbAddrA].i4Mode[(block_y << 2) + 3];\n                    }\n                    else\n                    {\n                        intra4x4PredModeA = AVC_I4_DC;\n                    }\n                }\n                else\n                {\n                    dcOnlyPredictionFlag = 1;\n                }\n            }\n\n            if (block_y > 0)\n            {\n                intra4x4PredModeB = currMB->i4Mode[((block_y-1) << 2) + block_x];\n            }\n            else\n            {\n                if (video->intraAvailB)\n                {\n                    if (video->mblock[video->mbAddrB].mbMode == AVC_I4)\n                    {\n                        intra4x4PredModeB = video->mblock[video->mbAddrB].i4Mode[(3 << 2) + block_x];\n                    }\n                    else\n                    {\n                        intra4x4PredModeB = AVC_I4_DC;\n                    }\n                }\n                else\n                {\n                    dcOnlyPredictionFlag = 1;\n                }\n            }\n\n            if (dcOnlyPredictionFlag)\n            {\n                intra4x4PredModeA = intra4x4PredModeB = AVC_I4_DC;\n            }\n\n            predIntra4x4PredMode = AVC_MIN(intra4x4PredModeA, intra4x4PredModeB);\n\n            flag = 0;\n            mode = currMB->i4Mode[(block_y<<2)+block_x];\n\n            if (mode == (AVCIntra4x4PredMode)predIntra4x4PredMode)\n            {\n                flag = 1;\n            }\n            else if (mode < predIntra4x4PredMode)\n            {\n                rem = mode;\n            }\n            else\n            {\n                rem = mode - 1;\n            }\n\n            BitstreamWrite1Bit(stream, flag);\n\n            if (!flag)\n            {\n                BitstreamWriteBits(stream, 3, rem);\n            }\n\n            bindx++;\n            block_y += (SubBlock_indx & 1) ;\n            block_x += (1 - 2 * (SubBlock_indx & 1)) ;\n        }\n    }\n\n    return AVCENC_SUCCESS;\n}\n\n\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/enc/src/vlc_encode.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"avcenc_lib.h\"\n\n/**\nSee algorithm in subclause 9.1, Table 9-1, Table 9-2. */\nAVCEnc_Status ue_v(AVCEncBitstream *bitstream, uint codeNum)\n{\n    if (AVCENC_SUCCESS != SetEGBitstring(bitstream, codeNum))\n        return AVCENC_FAIL;\n\n    return AVCENC_SUCCESS;\n}\n\n/**\nSee subclause 9.1.1, Table 9-3 */\nAVCEnc_Status  se_v(AVCEncBitstream *bitstream, int value)\n{\n    uint codeNum;\n    AVCEnc_Status status;\n\n    if (value <= 0)\n    {\n        codeNum = -value * 2;\n    }\n    else\n    {\n        codeNum = value * 2 - 1;\n    }\n\n    status = ue_v(bitstream, codeNum);\n\n    return status;\n}\n\nAVCEnc_Status te_v(AVCEncBitstream *bitstream, uint value, uint range)\n{\n    AVCEnc_Status status;\n\n    if (range > 1)\n    {\n        return ue_v(bitstream, value);\n    }\n    else\n    {\n        status = BitstreamWrite1Bit(bitstream, 1 - value);\n        return status;\n    }\n}\n\n/**\nSee subclause 9.1, Table 9-1, 9-2. */\n// compute leadingZeros and inforbits\n//codeNum = (1<<leadingZeros)-1+infobits;\nAVCEnc_Status SetEGBitstring(AVCEncBitstream *bitstream, uint codeNum)\n{\n    AVCEnc_Status status;\n    int leadingZeros;\n    int infobits;\n\n    if (!codeNum)\n    {\n        status = BitstreamWrite1Bit(bitstream, 1);\n        return status;\n    }\n\n    /* calculate leadingZeros and infobits */\n    leadingZeros = 1;\n    while ((uint)(1 << leadingZeros) < codeNum + 2)\n    {\n        leadingZeros++;\n    }\n    leadingZeros--;\n    infobits = codeNum - (1 << leadingZeros) + 1;\n\n    status = BitstreamWriteBits(bitstream, leadingZeros, 0);\n    infobits |= (1 << leadingZeros);\n    status = BitstreamWriteBits(bitstream, leadingZeros + 1, infobits);\n    return status;\n}\n\n/* see Table 9-4 assignment of codeNum to values of coded_block_pattern. */\nconst static uint8 MapCBP2code[48][2] =\n{\n    {3, 0}, {29, 2}, {30, 3}, {17, 7}, {31, 4}, {18, 8}, {37, 17}, {8, 13}, {32, 5}, {38, 18}, {19, 9}, {9, 14},\n    {20, 10}, {10, 15}, {11, 16}, {2, 11}, {16, 1}, {33, 32}, {34, 33}, {21, 36}, {35, 34}, {22, 37}, {39, 44}, {4, 40},\n    {36, 35}, {40, 45}, {23, 38}, {5, 41}, {24, 39}, {6, 42}, {7, 43}, {1, 19}, {41, 6}, {42, 24}, {43, 25}, {25, 20},\n    {44, 26}, {26, 21}, {46, 46}, {12, 28}, {45, 27}, {47, 47}, {27, 22}, {13, 29}, {28, 23}, {14, 30}, {15, 31}, {0, 12}\n};\n\nAVCEnc_Status EncodeCBP(AVCMacroblock *currMB, AVCEncBitstream *stream)\n{\n    AVCEnc_Status status;\n    uint codeNum;\n\n    if (currMB->mbMode == AVC_I4)\n    {\n        codeNum = MapCBP2code[currMB->CBP][0];\n    }\n    else\n    {\n        codeNum = MapCBP2code[currMB->CBP][1];\n    }\n\n    status = ue_v(stream, codeNum);\n\n    return status;\n}\n\nAVCEnc_Status ce_TotalCoeffTrailingOnes(AVCEncBitstream *stream, int TrailingOnes, int TotalCoeff, int nC)\n{\n    const static uint8 totCoeffTrailOne[3][4][17][2] =\n    {\n        {   // 0702\n            {{1, 1}, {6, 5}, {8, 7}, {9, 7}, {10, 7}, {11, 7}, {13, 15}, {13, 11}, {13, 8}, {14, 15}, {14, 11}, {15, 15}, {15, 11}, {16, 15}, {16, 11}, {16, 7}, {16, 4}},\n            {{0, 0}, {2, 1}, {6, 4}, {8, 6}, {9, 6}, {10, 6}, {11, 6}, {13, 14}, {13, 10}, {14, 14}, {14, 10}, {15, 14}, {15, 10}, {15, 1}, {16, 14}, {16, 10}, {16, 6}},\n            {{0, 0}, {0, 0}, {3, 1}, {7, 5}, {8, 5}, {9, 5}, {10, 5}, {11, 5}, {13, 13}, {13, 9}, {14, 13}, {14, 9}, {15, 13}, {15, 9}, {16, 13}, {16, 9}, {16, 5}},\n            {{0, 0}, {0, 0}, {0, 0}, {5, 3}, {6, 3}, {7, 4}, {8, 4}, {9, 4}, {10, 4}, {11, 4}, {13, 12}, {14, 12}, {14, 8}, {15, 12}, {15, 8}, {16, 12}, {16, 8}},\n        },\n        {\n            {{2, 3}, {6, 11}, {6, 7}, {7, 7}, {8, 7}, {8, 4}, {9, 7}, {11, 15}, {11, 11}, {12, 15}, {12, 11}, {12, 8}, {13, 15}, {13, 11}, {13, 7}, {14, 9}, {14, 7}},\n            {{0, 0}, {2, 2}, {5, 7}, {6, 10}, {6, 6}, {7, 6}, {8, 6}, {9, 6}, {11, 14}, {11, 10}, {12, 14}, {12, 10}, {13, 14}, {13, 10}, {14, 11}, {14, 8}, {14, 6}},\n            {{0, 0}, {0, 0}, {3, 3}, {6, 9}, {6, 5}, {7, 5}, {8, 5}, {9, 5}, {11, 13}, {11, 9}, {12, 13}, {12, 9}, {13, 13}, {13, 9}, {13, 6}, {14, 10}, {14, 5}},\n            {{0, 0}, {0, 0}, {0, 0}, {4, 5}, {4, 4}, {5, 6}, {6, 8}, {6, 4}, {7, 4}, {9, 4}, {11, 12}, {11, 8}, {12, 12}, {13, 12}, {13, 8}, {13, 1}, {14, 4}},\n        },\n        {\n            {{4, 15}, {6, 15}, {6, 11}, {6, 8}, {7, 15}, {7, 11}, {7, 9}, {7, 8}, {8, 15}, {8, 11}, {9, 15}, {9, 11}, {9, 8}, {10, 13}, {10, 9}, {10, 5}, {10, 1}},\n            {{0, 0}, {4, 14}, {5, 15}, {5, 12}, {5, 10}, {5, 8}, {6, 14}, {6, 10}, {7, 14}, {8, 14}, {8, 10}, {9, 14}, {9, 10}, {9, 7}, {10, 12}, {10, 8}, {10, 4}},\n            {{0, 0}, {0, 0}, {4, 13}, {5, 14}, {5, 11}, {5, 9}, {6, 13}, {6, 9}, {7, 13}, {7, 10}, {8, 13}, {8, 9}, {9, 13}, {9, 9}, {10, 11}, {10, 7}, {10, 3}},\n            {{0, 0}, {0, 0}, {0, 0}, {4, 12}, {4, 11}, {4, 10}, {4, 9}, {4, 8}, {5, 13}, {6, 12}, {7, 12}, {8, 12}, {8, 8}, {9, 12}, {10, 10}, {10, 6}, {10, 2}}\n        }\n    };\n\n\n    AVCEnc_Status status = AVCENC_SUCCESS;\n    uint code, len;\n    int vlcnum;\n\n    if (TrailingOnes > 3)\n    {\n        return AVCENC_TRAILINGONES_FAIL;\n    }\n\n    if (nC >= 8)\n    {\n        if (TotalCoeff)\n        {\n            code = ((TotalCoeff - 1) << 2) | (TrailingOnes);\n        }\n        else\n        {\n            code = 3;\n        }\n        status = BitstreamWriteBits(stream, 6, code);\n    }\n    else\n    {\n        if (nC < 2)\n        {\n            vlcnum = 0;\n        }\n        else if (nC < 4)\n        {\n            vlcnum = 1;\n        }\n        else\n        {\n            vlcnum = 2;\n        }\n\n        len = totCoeffTrailOne[vlcnum][TrailingOnes][TotalCoeff][0];\n        code = totCoeffTrailOne[vlcnum][TrailingOnes][TotalCoeff][1];\n        status = BitstreamWriteBits(stream, len, code);\n    }\n\n    return status;\n}\n\nAVCEnc_Status ce_TotalCoeffTrailingOnesChromaDC(AVCEncBitstream *stream, int TrailingOnes, int TotalCoeff)\n{\n    const static uint8 totCoeffTrailOneChrom[4][5][2] =\n    {\n        { {2, 1}, {6, 7}, {6, 4}, {6, 3}, {6, 2}},\n        { {0, 0}, {1, 1}, {6, 6}, {7, 3}, {8, 3}},\n        { {0, 0}, {0, 0}, {3, 1}, {7, 2}, {8, 2}},\n        { {0, 0}, {0, 0}, {0, 0}, {6, 5}, {7, 0}},\n    };\n\n    AVCEnc_Status status = AVCENC_SUCCESS;\n    uint code, len;\n\n    len = totCoeffTrailOneChrom[TrailingOnes][TotalCoeff][0];\n    code = totCoeffTrailOneChrom[TrailingOnes][TotalCoeff][1];\n    status = BitstreamWriteBits(stream, len, code);\n\n    return status;\n}\n\n/* see Table 9-7 and 9-8 */\nAVCEnc_Status ce_TotalZeros(AVCEncBitstream *stream, int total_zeros, int TotalCoeff)\n{\n    const static uint8 lenTotalZeros[15][16] =\n    {\n        { 1, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 9},\n        { 3, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, 6, 6},\n        { 4, 3, 3, 3, 4, 4, 3, 3, 4, 5, 5, 6, 5, 6},\n        { 5, 3, 4, 4, 3, 3, 3, 4, 3, 4, 5, 5, 5},\n        { 4, 4, 4, 3, 3, 3, 3, 3, 4, 5, 4, 5},\n        { 6, 5, 3, 3, 3, 3, 3, 3, 4, 3, 6},\n        { 6, 5, 3, 3, 3, 2, 3, 4, 3, 6},\n        { 6, 4, 5, 3, 2, 2, 3, 3, 6},\n        { 6, 6, 4, 2, 2, 3, 2, 5},\n        { 5, 5, 3, 2, 2, 2, 4},\n        { 4, 4, 3, 3, 1, 3},\n        { 4, 4, 2, 1, 3},\n        { 3, 3, 1, 2},\n        { 2, 2, 1},\n        { 1, 1},\n    };\n\n    const static uint8 codTotalZeros[15][16] =\n    {\n        {1, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 1},\n        {7, 6, 5, 4, 3, 5, 4, 3, 2, 3, 2, 3, 2, 1, 0},\n        {5, 7, 6, 5, 4, 3, 4, 3, 2, 3, 2, 1, 1, 0},\n        {3, 7, 5, 4, 6, 5, 4, 3, 3, 2, 2, 1, 0},\n        {5, 4, 3, 7, 6, 5, 4, 3, 2, 1, 1, 0},\n        {1, 1, 7, 6, 5, 4, 3, 2, 1, 1, 0},\n        {1, 1, 5, 4, 3, 3, 2, 1, 1, 0},\n        {1, 1, 1, 3, 3, 2, 2, 1, 0},\n        {1, 0, 1, 3, 2, 1, 1, 1, },\n        {1, 0, 1, 3, 2, 1, 1, },\n        {0, 1, 1, 2, 1, 3},\n        {0, 1, 1, 1, 1},\n        {0, 1, 1, 1},\n        {0, 1, 1},\n        {0, 1},\n    };\n    int len, code;\n    AVCEnc_Status status;\n\n    len = lenTotalZeros[TotalCoeff-1][total_zeros];\n    code = codTotalZeros[TotalCoeff-1][total_zeros];\n\n    status = BitstreamWriteBits(stream, len, code);\n\n    return status;\n}\n\n/* see Table 9-9 */\nAVCEnc_Status ce_TotalZerosChromaDC(AVCEncBitstream *stream, int total_zeros, int TotalCoeff)\n{\n    const static uint8 lenTotalZerosChromaDC[3][4] =\n    {\n        { 1, 2, 3, 3, },\n        { 1, 2, 2, 0, },\n        { 1, 1, 0, 0, },\n    };\n\n    const static uint8 codTotalZerosChromaDC[3][4] =\n    {\n        { 1, 1, 1, 0, },\n        { 1, 1, 0, 0, },\n        { 1, 0, 0, 0, },\n    };\n\n    int len, code;\n    AVCEnc_Status status;\n\n    len = lenTotalZerosChromaDC[TotalCoeff-1][total_zeros];\n    code = codTotalZerosChromaDC[TotalCoeff-1][total_zeros];\n\n    status = BitstreamWriteBits(stream, len, code);\n\n    return status;\n}\n\n/* see Table 9-10 */\nAVCEnc_Status ce_RunBefore(AVCEncBitstream *stream, int run_before, int zerosLeft)\n{\n    const static uint8 lenRunBefore[7][16] =\n    {\n        {1, 1},\n        {1, 2, 2},\n        {2, 2, 2, 2},\n        {2, 2, 2, 3, 3},\n        {2, 2, 3, 3, 3, 3},\n        {2, 3, 3, 3, 3, 3, 3},\n        {3, 3, 3, 3, 3, 3, 3, 4, 5, 6, 7, 8, 9, 10, 11},\n    };\n\n    const static uint8 codRunBefore[7][16] =\n    {\n        {1, 0},\n        {1, 1, 0},\n        {3, 2, 1, 0},\n        {3, 2, 1, 1, 0},\n        {3, 2, 3, 2, 1, 0},\n        {3, 0, 1, 3, 2, 5, 4},\n        {7, 6, 5, 4, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1},\n    };\n\n    int len, code;\n    AVCEnc_Status status;\n\n    if (zerosLeft <= 6)\n    {\n        len = lenRunBefore[zerosLeft-1][run_before];\n        code = codRunBefore[zerosLeft-1][run_before];\n    }\n    else\n    {\n        len = lenRunBefore[6][run_before];\n        code = codRunBefore[6][run_before];\n    }\n\n    status = BitstreamWriteBits(stream, len, code);\n\n\n    return status;\n}\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/oscl/oscl_base.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef OSCL_BASE_H_INCLUDED\n#define OSCL_BASE_H_INCLUDED\n\n#include \"oscl_config.h\"\n#include \"oscl_types.h\"\n#include \"oscl_error.h\"\n\nclass OsclBase\n{\n    public:\n        OSCL_IMPORT_REF  static void Init() {};\n        OSCL_IMPORT_REF  static void Cleanup() {};\n};\n\nclass OsclErrorTrap\n{\n    public:\n\n        OSCL_IMPORT_REF  static void Init() {};\n        OSCL_IMPORT_REF  static void Cleanup() {};\n        OSCL_IMPORT_REF  static void leave(int) {};\n};\n\nclass OsclMem\n{\n    public:\n        OSCL_IMPORT_REF  static void Init() {};\n        OSCL_IMPORT_REF  static void Cleanup() {};\n};\n\nclass OsclRequestStatus\n{\n    public:\n        OsclRequestStatus();\n        OsclRequestStatus(int32 aVal)\n        {\n            (void)(aVal);\n        };\n        int32 operator=(int32 aVal);\n        int32 operator==(int32 aVal) const;\n        int32 operator!=(int32 aVal) const;\n        int32 operator>=(int32 aVal) const;\n        int32 operator<=(int32 aVal) const;\n        int32 operator>(int32 aVal) const;\n        int32 operator<(int32 aVal) const;\n        int32 Int() const;\n    private:\n        int32 iStatus;\n};\n\nclass OsclActiveObject\n{\n    public:\n        /**\n         * Scheduling priorities.\n         */\n        enum TPriority\n        {\n            /**\n            A low priority, useful for active objects representing\n            background processing.\n            */\n            EPriorityIdle = -100,\n            /**\n            A priority higher than EPriorityIdle but lower than EPriorityStandard.\n            */\n            EPriorityLow = -20,\n            /**\n            Most active objects will have this priority.\n            */\n            EPriorityStandard = 0,\n            /**\n            A priority higher than EPriorityStandard; useful for active objects\n            handling user input.\n            */\n            EPriorityUserInput = 10,\n            /**\n            A priority higher than EPriorityUserInput.\n            */\n            EPriorityHigh = 20\n        };\n\n        /**\n         * Constructor.\n         * @param aPriority (input param): scheduling priority\n         * @param name (inpup param): optional name for this AO.\n         */\n        OSCL_IMPORT_REF OsclActiveObject(int32 aPriority, const char name[]);\n\n        /**\n         * Destructor.\n         */\n        OSCL_IMPORT_REF virtual ~OsclActiveObject();\n\n        /**\n         * Set request active for this AO.\n         * Will panic if the request is already active,\n         * or the active object is not added to any scheduler,\n         * or the calling thread context does not match\n         * the scheduler thread.\n         */\n        OSCL_IMPORT_REF void SetBusy();\n\n        /**\n         * Return true if this AO is active,\n         * false otherwise.\n         */\n        OSCL_IMPORT_REF bool IsBusy() const;\n\n        /**\n         * Set request active for this AO and set the status to pending.\n         * PendForExec is identical to SetBusy, but it\n         * additionally sets the request status to OSCL_REQUEST_PENDING.\n         *\n         */\n        OSCL_IMPORT_REF void PendForExec();\n\n        /**\n         * Complate the active request for the AO.  Can be\n         * called from any thread.\n         * @param aStatus: request completion status.\n         */\n        OSCL_IMPORT_REF void PendComplete(int32 aStatus);\n\n\n        /**\n         * Add this AO to the current thread's scheduler.\n         */\n        OSCL_IMPORT_REF void AddToScheduler();\n\n        /**\n         * Return true if this AO is added to the scheduler,\n         * false otherwise.\n         */\n        OSCL_IMPORT_REF bool IsAdded() const;\n\n        /**\n         * Remove this AO from its scheduler.\n         * Will panic if the calling thread context does\n         * not match the scheduling thread.\n         * Cancels any active request before removing.\n         */\n        OSCL_IMPORT_REF void RemoveFromScheduler();\n\n        /**\n         * Deque is identical to RemoveFromScheduler\n         * It's only needed to prevent accidental usage\n         * of Symbian CActive::Deque.\n         */\n        OSCL_IMPORT_REF void Deque();\n\n        /**\n         * Complete this AO's request immediately.\n         * If the AO is already active, this will do nothing.\n         * Will panic if the AO is not acced to any scheduler,\n         * or if the calling thread context does not match the\n         * scheduling thread.\n         */\n        OSCL_IMPORT_REF void RunIfNotReady();\n\n        /**\n         * Cancel any active request.\n         * If the request is active, this will call the DoCancel\n         * routine, wait for the request to cancel, then set the\n         * request inactive.  The AO will not run.\n         * If the request is not active, it does nothing.\n         * Request must be canceled from the same thread\n         * in which it is scheduled.\n         */\n        OSCL_IMPORT_REF void Cancel();\n\n        /**\n        * Return scheduling priority of this active object.\n        */\n        OSCL_IMPORT_REF int32 Priority() const;\n\n        /**\n        * Request status access\n        */\n        OSCL_IMPORT_REF int32 Status()const;\n        OSCL_IMPORT_REF void SetStatus(int32);\n        OSCL_IMPORT_REF int32 StatusRef();\n\n    protected:\n        /**\n         * Cancel request handler.\n         * This gets called by scheduler when the request\n         * is cancelled.  The default routine will complete\n         * the request.  If any additional action is needed,\n         * the derived class may override this.  If the derived class\n         * does override DoCancel, it must complete the request.\n         */\n        //OSCL_IMPORT_REF virtual void DoCancel();\n\n        /**\n        * Run Error handler.\n        * This gets called by scheduler when the Run routine leaves.\n        * The default implementation simply returns the leave code.\n        * If the derived class wants to handle errors from Run,\n        * it may override this.  The RunError should return OsclErrNone\n        * if it handles the error, otherwise it should return the\n        * input error code.\n        * @param aError: the leave code generated by the Run.\n        */\n        //OSCL_IMPORT_REF virtual int32 RunError(int32 aError);\n};\n\n\nclass OsclTimerObject\n{\n    public:\n        /**\n         * Constructor.\n         * @param aPriority (input param): scheduling priority\n         * @param name (input param): optional name for this AO.\n         */\n        OSCL_IMPORT_REF OsclTimerObject(int32 aPriority, const char name[]);\n\n        /**\n         * Destructor.\n         */\n\n        //OSCL_IMPORT_REF virtual ~OsclTimerObject();\n\n        /**\n         * Add this AO to the current thread's scheduler.\n         */\n        OSCL_IMPORT_REF void AddToScheduler();\n\n        /**\n         * Return true if this AO is added to the scheduler,\n         * false otherwise.\n         */\n        OSCL_IMPORT_REF bool IsAdded() const;\n\n        /**\n         * Remove this AO from its scheduler.\n         * Will panic if the calling thread context does\n         * not match the scheduling thread.\n         * Cancels any active request before removing.\n         */\n        OSCL_IMPORT_REF void RemoveFromScheduler();\n\n        /**\n         * Deque is identical to RemoveFromScheduler\n         * It's only needed to prevent accidental usage\n         * of Symbian CActive::Deque.\n         */\n        OSCL_IMPORT_REF void Deque();\n\n        /**\n        * 'After' sets the request active, with request status\n        * OSCL_REQUEST_STATUS_PENDING, and starts a timer.\n        * When the timer expires, the request will complete with\n        * status OSCL_REQUEST_ERR_NONE.\n        * Must be called from the same thread in which the\n        * active object is scheduled.\n        * Will panic if the request is already active, the object\n        * is not added to any scheduler, or the calling thread\n        * does not match the scheduling thread.\n        * @param anInterval: timeout interval in microseconds.\n        */\n        OSCL_IMPORT_REF void After(int32 aDelayMicrosec);\n\n        /**\n         * Complete the request after a time interval.\n         * RunIfNotReady is identical to After() except that it\n         * first checks the request status, and if it is already\n         * active, it does nothing.\n         *\n         * @param aDelayMicrosec (input param): delay in microseconds.\n         */\n        OSCL_IMPORT_REF void RunIfNotReady(uint32 aDelayMicrosec = 0);\n\n        /**\n         * Set request active for this AO.\n         * Will panic if the request is already active,\n         * or the active object is not added to any scheduler,\n         * or the calling thread context does not match\n         * the scheduler thread.\n         */\n        OSCL_IMPORT_REF void SetBusy();\n\n        /**\n         * Return true if this AO is active,\n         * false otherwise.\n         */\n        OSCL_IMPORT_REF bool IsBusy() const;\n\n        /**\n         * Cancel any active request.\n         * If the request is active, this will call the DoCancel\n         * routine, wait for the request to cancel, then set the\n         * request inactive.  The AO will not run.\n         * If the request is not active, it does nothing.\n         * Request must be canceled from the same thread\n         * in which it is scheduled.\n         */\n        OSCL_IMPORT_REF void Cancel();\n\n        /**\n        * Return scheduling priority of this active object.\n        */\n        OSCL_IMPORT_REF int32 Priority() const;\n        /**\n        * Request status access\n        */\n        OSCL_IMPORT_REF int32 Status()const;\n        OSCL_IMPORT_REF void SetStatus(int32);\n        OSCL_IMPORT_REF int32 StatusRef();\n\n    protected:\n        /**\n         * Cancel request handler.\n         * This gets called by scheduler when the request\n         * is cancelled.  The default routine will cancel\n         * the timer.  If any additional action is needed,\n         * the derived class may override this.  If the\n         * derived class does override this, it should explicitly\n         * call OsclTimerObject::DoCancel in its own DoCancel\n         * routine.\n         */\n        //OSCL_IMPORT_REF virtual void DoCancel();\n\n        /**\n        * Run Error handler.\n        * This gets called by scheduler when the Run routine leaves.\n        * The default implementation simply returns the leave code.\n        * If the derived class wants to handle errors from Run,\n        * it may override this.  The RunError should return OsclErrNone\n        * if it handles the error, otherwise it should return the\n        * input error code.\n        * @param aError: the leave code generated by the Run.\n        */\n        //OSCL_IMPORT_REF virtual int32 RunError(int32 aError);\n};\n\n#endif // OSCL_BASE_H_INCLUDED\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/oscl/oscl_base_macros.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef OSCL_BASE_MACROS_H_INCLUDED\n#define OSCL_BASE_MACROS_H_INCLUDED\n\n#ifndef OSCL_UNUSED_ARG\n#define OSCL_UNUSED_ARG(x) (void)(x)\n#endif\n\n#endif // OSCL_BASE_MACROS_H_INCLUDED\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/oscl/oscl_config.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef OSCL_CONFIG_H_INCLUDED\n#define OSCL_CONFIG_H_INCLUDED\n\n#define OSCL_HAS_BREW_SUPPORT 0   //Not yet supported\n\n#define OSCL_HAS_SYMBIAN_SUPPORT 0 // Not yet supported\n\n#define OSCL_HAS_LINUX_SUPPORT 1\n\n#endif // OSCL_CONFIG_H_INCLUDED\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/oscl/oscl_dll.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef OSCL_DLL_H_INCLUDED\n#define OSCL_DLL_H_INCLUDED\n\n#define OSCL_DLL_ENTRY_POINT() void oscl_dll_entry_point() {}\n\n\n/**\n * Default DLL entry/exit point function.\n *\n * The body of the DLL entry point is given.  The macro\n * only needs to be declared within the source file.\n *\n * Usage :\n *\n * OSCL_DLL_ENTRY_POINT_DEFAULT()\n */\n\n#define OSCL_DLL_ENTRY_POINT_DEFAULT()\n\n\n\n#endif // OSCL_DLL_H_INCLUDED\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/oscl/oscl_error.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef OSCL_ERROR_H_INCLUDED\n#define OSCL_ERROR_H_INCLUDED\n\n\n#define OSCL_LEAVE(x)\n\n\n#endif //OSCL_ERROR_H_INCLUDED\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/oscl/oscl_error_codes.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n\n#ifndef OSCL_ERROR_CODES_H_INCLUDED\n#define OSCL_ERROR_CODES_H_INCLUDED\n\n\n/** Leave Codes\n*/\ntypedef int32 OsclLeaveCode;\n\n#define OsclErrNone 0\n#define OsclErrGeneral 100\n#define OsclErrNoMemory 101\n#define OsclErrCancelled 102\n#define OsclErrNotSupported 103\n#define OsclErrArgument 104\n#define OsclErrBadHandle 105\n#define OsclErrAlreadyExists 106\n#define OsclErrBusy 107\n#define OsclErrNotReady 108\n#define OsclErrCorrupt 109\n#define OsclErrTimeout 110\n#define OsclErrOverflow 111\n#define OsclErrUnderflow 112\n#define OsclErrInvalidState 113\n#define OsclErrNoResources 114\n\n/** For backward compatibility with old definitions\n*/\n#define OSCL_ERR_NONE OsclErrNone\n#define OSCL_BAD_ALLOC_EXCEPTION_CODE OsclErrNoMemory\n\n/** Return Codes\n*/\ntypedef int32 OsclReturnCode;\n\n#define  OsclSuccess 0\n#define  OsclPending 1\n#define  OsclFailure -1\n\n#endif\n\n/*! @} */\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/oscl/oscl_exception.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n\n#ifndef OSCL_EXCEPTION_H_INCLUDED\n#define OSCL_EXCEPTION_H_INCLUDED\n\n\n\n#endif // INCLUDED_OSCL_EXCEPTION_H\n\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/oscl/oscl_math.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef OSCL_MATH_H_INCLUDED\n#define OSCL_MATH_H_INCLUDED\n\n#include <math.h>\n\n\n\n#define oscl_pow        pow\n#define oscl_exp        exp\n#define oscl_sqrt       sqrt\n#define oscl_log        log\n#define oscl_cos        cos\n#define oscl_sin        sin\n#define oscl_tan        tan\n#define oscl_asin       asin\n\n#endif // OSCL_MATH_H_INCLUDED\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/oscl/oscl_mem.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef OSCL_MEM_H_INCLUDED\n#define OSCL_MEM_H_INCLUDED\n\n#include \"oscl_types.h\"\n\n#define OSCLMemSizeT size_t\n\n#define oscl_memcpy(dest, src, count)       memcpy((void *)(dest), (const void *)(src), (OSCLMemSizeT)(count))\n#define oscl_memset(dest, ch, count)        memset((void *)(dest), (unsigned char)(ch), (OSCLMemSizeT)(count))\n#define oscl_memmove(dest, src, bytecount)  memmove((void *)(dest), (const void *)(src), (OSCLMemSizeT)(bytecount))\n#define oscl_memcmp(buf1, buf2, count)      memcmp( (const void *)(buf1), (const void *)(buf2), (OSCLMemSizeT)(count))\n#define oscl_malloc(size)                      malloc((OSCLMemSizeT)(size))\n#define oscl_free(memblock)                 free((void *)(memblock))\n#define OSCL_ARRAY_DELETE(ptr)              delete [] ptr\n#define OSCL_ARRAY_NEW(T, count)            new T[count]\n#define OSCL_DELETE(memblock)               delete memblock\n#define OSCL_NEW(arg)                       new arg\n\n#endif // OSCL_MEM_H_INCLUDED\n\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/oscl/oscl_string.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n// -*- c++ -*-\n// = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =\n\n//               O S C L_ S T R I N G   C L A S S\n\n//    This file contains a standardized set of string containers that\n//    can be used in place of character arrays.\n\n// = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =\n\n/*! \\addtogroup osclutil OSCL Util\n *\n * @{\n */\n\n\n/*!\n * \\file oscl_string.h\n * \\brief Provides a standardized set of string containers that\n *    can be used in place of character arrays.\n *\n */\n\n\n#ifndef OSCL_STRING_H_INCLUDED\n#define OSCL_STRING_H_INCLUDED\n\n\n#ifndef OSCL_BASE_H_INCLUDED\n#include \"oscl_base.h\"\n#endif\n\n#ifndef OSCL_MEM_H_INCLUDED\n#include \"oscl_mem.h\"\n#endif\n\n\n\n#endif   // OSCL_STRING_H_INCLUDED\n\n/*! @} */\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/oscl/oscl_types.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*! \\file oscl_types.h\n    \\brief This file contains basic type definitions for common use across platforms.\n\n*/\n\n\n\n#ifndef OSCL_TYPES_H_INCLUDED\n#define OSCL_TYPES_H_INCLUDED\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <stdarg.h>\n#include <string.h>\n#include <limits.h>\n#include <string.h>\n\n#define OMX_TRUE 1\n#define OMX_FALSE 0\n\n//! A typedef for a signed 8 bit integer.\n#ifndef int8\ntypedef signed char int8;\n#endif\n\n//! A typedef for an unsigned 8 bit integer.\n#ifndef uint8\ntypedef unsigned char uint8;\n#endif\n\n//! A typedef for a signed 16 bit integer.\n#ifndef int16\ntypedef short int16;\n#endif\n\n//! A typedef for an unsigned 16 bit integer.\n#ifndef uint16\ntypedef unsigned short uint16;\n#endif\n\n//! A typedef for a signed 32 bit integer.\n#ifndef int32\ntypedef long int32;\n#endif\n\n//! A typedef for an unsigned 32 bit integer.\n#ifndef uint32\ntypedef unsigned long uint32;\n#endif\n\n#ifndef sint8\ntypedef signed char sint8;\n#endif\n\n#ifndef OsclFloat\ntypedef float OsclFloat;\n#endif\n\n#ifndef uint\ntypedef unsigned int uint;\n#endif\n\n\n#ifndef int64\n#define OSCL_HAS_NATIVE_INT64_TYPE 1\n#define OSCL_NATIVE_INT64_TYPE long long\ntypedef OSCL_NATIVE_INT64_TYPE int64;\n#endif // int64\n\n#ifndef uint64\n#define OSCL_HAS_NATIVE_UINT64_TYPE  1\n#define OSCL_NATIVE_UINT64_TYPE unsigned long long\ntypedef OSCL_NATIVE_UINT64_TYPE uint64;\n#endif // uint64\n\n#ifndef OSCL_UNUSED_ARG\n#define OSCL_UNUSED_ARG(x) (void)(x)\n#endif\n\n#ifndef OSCL_EXPORT_REF\n#define OSCL_EXPORT_REF\n#endif\n\n#ifndef OSCL_IMPORT_REF\n#define OSCL_IMPORT_REF\n#endif\n\n#if defined(OSCL_DISABLE_INLINES)\n#define OSCL_INLINE\n#define OSCL_COND_EXPORT_REF OSCL_EXPORT_REF\n#define OSCL_COND_IMPORT_REF OSCL_IMPORT_REF\n#else\n#define OSCL_INLINE inline\n#define OSCL_COND_IMPORT_REF\n#define OSCL_COND_IMPORT_REF\n#endif\n\n#ifndef INT64\n#define INT64 int64\n#endif\n\n#ifndef UINT64\n#define UINT64 uint64\n#endif\n\n#ifndef UINT64_HILO\n#define UINT64_HILO(a,b) ((a<<32) | b)\n#endif\n\n\n#endif // OSCL_TYPES_H_INCLUDED\n"
  },
  {
    "path": "RtspCamera/jni/avc_h264/oscl/osclconfig_compiler_warnings.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n// -*- c++ -*-\n// = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =\n\n//       O S C L C O N F I G _ C O M P I L E R  _ W A R N I N G S\n\n// = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =\n\n\n/*! \\file osclconfig_compiler_warnings.h\n *  \\brief This file contains the ability to turn off/on compiler warnings\n *\n */\n\n// This macro enables the \"#pragma GCC system_header\" found in any header file that\n// includes this config file.\n// \"#pragma GCC system_header\" suppresses compiler warnings in the rest of that header\n// file by treating the header as a system header file.\n// For instance, foo.h has 30 lines, \"#pragma GCC system_header\" is inserted at line 10,\n// from line 11 to the end of file, all compiler warnings are disabled.\n// However, this does not affect any files that include foo.h.\n//\n#ifdef __GNUC__\n#define OSCL_DISABLE_GCC_WARNING_SYSTEM_HEADER\n#endif\n\n#define OSCL_FUNCTION_PTR(x) (&x)\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/Android.mk",
    "content": "include $(call all-subdir-makefiles)\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/Android.mk",
    "content": "#\n# Copyright (C) 2008 The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# This makefile supplies the rules for building a library of JNI code for\n# use by our example platform shared library.\n\nLOCAL_PATH:= $(call my-dir)\ninclude $(CLEAR_VARS)\n\nLOCAL_MODULE_TAGS := optional\n\n# This is the target being built.\nLOCAL_MODULE:= libH263Decoder\n\n# All of the source files that we will compile.\nLOCAL_SRC_FILES:= \\\nsrc/com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder.cpp \\\nsrc/adaptive_smooth_no_mmx.cpp \\\nsrc/bitstream.cpp \\\nsrc/block_idct.cpp \\\nsrc/cal_dc_scaler.cpp \\\nsrc/chv_filter.cpp \\\nsrc/chvr_filter.cpp \\\nsrc/combined_decode.cpp \\\nsrc/conceal.cpp \\\nsrc/datapart_decode.cpp \\\nsrc/dcac_prediction.cpp \\\nsrc/dec_pred_intra_dc.cpp \\\nsrc/deringing_chroma.cpp \\\nsrc/deringing_luma.cpp \\\nsrc/find_min_max.cpp \\\nsrc/get_pred_adv_b_add.cpp \\\nsrc/get_pred_outside.cpp \\\nsrc/idct.cpp \\\nsrc/idct_vca.cpp  \\\nsrc/mb_motion_comp.cpp \\\nsrc/mb_utils.cpp \\\nsrc/pvdec_api.cpp \\\nsrc/packet_util.cpp \\\nsrc/post_filter.cpp \\\nsrc/post_proc_semaphore.cpp \\\nsrc/pp_semaphore_chroma_inter.cpp \\\nsrc/pp_semaphore_luma.cpp \\\nsrc/scaling_tab.cpp \\\nsrc/vlc_decode.cpp \\\nsrc/vlc_dequant.cpp \\\nsrc/vlc_tab.cpp \\\nsrc/vop.cpp \\\nsrc/zigzag_tab.cpp \\\nsrc/yuv2rgb.cpp \\\nsrc/3GPVideoParser.cpp\n\n# All of the shared libraries we link against.\nLOCAL_SHARED_LIBRARIES := \n\n# No static libraries.\nLOCAL_STATIC_LIBRARIES :=\n\n# Also need the JNI headers.\nLOCAL_C_INCLUDES += \\\n\t$(JNI_H_INCLUDE) \\\n\t$(LOCAL_PATH)/src \\\n \t$(LOCAL_PATH)/include \\\n\t$(LOCAL_PATH)/oscl\n\n# No specia compiler flags.\nLOCAL_CFLAGS +=\n\n# Don't prelink this library.  For more efficient code, you may want\n# to add this library to the prelink map and set this to true.\nLOCAL_PRELINK_MODULE := false\n\ninclude $(BUILD_SHARED_LIBRARY)\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/include/mp4dec_api.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef _MP4DEC_API_H_\n#define _MP4DEC_API_H_\n\n#ifndef OSCL_BASE_H_INCLUDED\n#include \"oscl_base.h\"\n#endif\n\n#ifndef OSCL_TYPES_H_INCLUDED\n#include \"oscl_types.h\"\n#endif\n\n#define PV_TOLERATE_VOL_ERRORS\n#define PV_MEMORY_POOL\n\n#ifndef _PV_TYPES_\n#define _PV_TYPES_\n\ntypedef uint Bool;\n\n#define PV_CODEC_INIT  0\n#define PV_CODEC_STOP  1\n#endif\n\n#define PV_TRUE  1\n#define PV_FALSE 0\n\n/* flag for post-processing  4/25/00 */\n\n#ifdef DEC_NOPOSTPROC\n#undef PV_POSTPROC_ON   /* enable compilation of post-processing code */\n#else\n#define PV_POSTPROC_ON\n#endif\n\n#define PV_NO_POST_PROC 0\n#define PV_DEBLOCK 1\n#define PV_DERING  2\n\n\n\n#include \"visual_header.h\" // struct VolInfo is defined\n\n\n/**@name Structure and Data Types\n * These type definitions specify the input / output from the PVMessage\n * library.\n */\n\n/*@{*/\n/* The application has to allocate space for this structure */\ntypedef struct tagOutputFrame\n{\n    uint8       *data;          /* pointer to output YUV buffer */\n    uint32      timeStamp;      /* time stamp */\n} OutputFrame;\n\ntypedef struct tagApplicationData\n{\n    int layer;          /* current video layer */\n    void *object;       /* some optional data field */\n} applicationData;\n\n/* Application controls, this structed shall be allocated */\n/*    and initialized in the application.                 */\ntypedef struct tagvideoDecControls\n{\n    /* The following fucntion pointer is copied to BitstreamDecVideo structure  */\n    /*    upon initialization and never used again. */\n    int (*readBitstreamData)(uint8 *buf, int nbytes_required, void *appData);\n    applicationData appData;\n\n    uint8 *outputFrame;\n    void *videoDecoderData;     /* this is an internal pointer that is only used */\n    /* in the decoder library.   */\n#ifdef PV_MEMORY_POOL\n    int32 size;\n#endif\n    int nLayers;\n    /* pointers to VOL data for frame-based decoding. */\n    uint8 *volbuf[2];           /* maximum of 2 layers for now */\n    int32 volbuf_size[2];\n\n} VideoDecControls;\n\ntypedef enum\n{\n    H263_MODE = 0, MPEG4_MODE,\n    FLV_MODE,\n    UNKNOWN_MODE\n} MP4DecodingMode;\n\ntypedef enum\n{\n    MP4_I_FRAME, MP4_P_FRAME, MP4_B_FRAME, MP4_BAD_FRAME\n} MP4FrameType;\n\ntypedef struct tagVopHeaderInfo\n{\n    int     currLayer;\n    uint32  timestamp;\n    MP4FrameType    frameType;\n    int     refSelCode;\n    int16       quantizer;\n} VopHeaderInfo;\n\n/*--------------------------------------------------------------------------*\n * VideoRefCopyInfo:\n * OMAP DSP specific typedef structure, to support the user (ARM) copying\n * of a Reference Frame into the Video Decoder.\n *--------------------------------------------------------------------------*/\ntypedef struct tagVideoRefCopyInfoPtr\n{\n    uint8   *yChan;             /* The Y component frame the user can copy a new reference to */\n    uint8   *uChan;             /* The U component frame the user can copy a new reference to */\n    uint8   *vChan;             /* The V component frame the user can copy a new reference to */\n    uint8   *currentVop;        /* The Vop for video the user can copy a new reference to */\n} VideoRefCopyInfoPtr;\n\ntypedef struct tagVideoRefCopyInfoData\n{\n    int16   width;              /* Width */\n    int16   height;             /* Height */\n    int16   realWidth;          /* Non-padded width, not a multiple of 16. */\n    int16   realHeight;         /* Non-padded height, not a multiple of 16. */\n} VideoRefCopyInfoData;\n\ntypedef struct tagVideoRefCopyInfo\n{\n    VideoRefCopyInfoData data;\n    VideoRefCopyInfoPtr ptrs;\n} VideoRefCopyInfo;\n\n/*@}*/\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n\n    OSCL_IMPORT_REF Bool    PVInitVideoDecoder(VideoDecControls *decCtrl, uint8 *volbuf[], int32 *volbuf_size, int nLayers, int width, int height, MP4DecodingMode mode);\n    Bool    PVAllocVideoData(VideoDecControls *decCtrl, int width, int height, int nLayers);\n    OSCL_IMPORT_REF Bool    PVCleanUpVideoDecoder(VideoDecControls *decCtrl);\n    Bool    PVResetVideoDecoder(VideoDecControls *decCtrl);\n    OSCL_IMPORT_REF void    PVSetReferenceYUV(VideoDecControls *decCtrl, uint8 *refYUV);\n    Bool    PVDecSetReference(VideoDecControls *decCtrl, uint8 *refYUV, uint32 timestamp);\n    Bool    PVDecSetEnhReference(VideoDecControls *decCtrl, uint8 *refYUV, uint32 timestamp);\n    OSCL_IMPORT_REF Bool    PVDecodeVideoFrame(VideoDecControls *decCtrl, uint8 *bitstream[], uint32 *timestamp, int32 *buffer_size, uint use_ext_timestamp[], uint8* currYUV);\n    Bool    PVDecodeVopHeader(VideoDecControls *decCtrl, uint8 *buffer[], uint32 timestamp[], int32 buffer_size[], VopHeaderInfo *header_info, uint use_ext_timestamp[], uint8 *currYUV);\n    Bool    PVDecodeVopBody(VideoDecControls *decCtrl, int32 buffer_size[]);\n    void    PVDecPostProcess(VideoDecControls *decCtrl, uint8 *outputYUV);\n    OSCL_IMPORT_REF void    PVGetVideoDimensions(VideoDecControls *decCtrl, int32 *display_width, int32 *display_height);\n    OSCL_IMPORT_REF void    PVSetPostProcType(VideoDecControls *decCtrl, int mode);\n    uint32  PVGetVideoTimeStamp(VideoDecControls *decoderControl);\n    int     PVGetDecBitrate(VideoDecControls *decCtrl);\n    int     PVGetDecFramerate(VideoDecControls *decCtrl);\n    uint8   *PVGetDecOutputFrame(VideoDecControls *decCtrl);\n    int     PVGetLayerID(VideoDecControls *decCtrl);\n    int32   PVGetDecMemoryUsage(VideoDecControls *decCtrl);\n    OSCL_IMPORT_REF MP4DecodingMode PVGetDecBitstreamMode(VideoDecControls *decCtrl);\n    Bool    PVExtractVolHeader(uint8 *video_buffer, uint8 *vol_header, int32 *vol_header_size);\n    int32   PVLocateFrameHeader(uint8 *video_buffer, int32 vop_size);\n    int32   PVLocateH263FrameHeader(uint8 *video_buffer, int32 vop_size);\n    Bool    PVGetVolInfo(VideoDecControls *decCtrl, VolInfo *pVolInfo); // BX 6/24/04\n    Bool    IsIntraFrame(VideoDecControls *decoderControl);\n\n#ifdef __cplusplus\n}\n#endif\n#endif /* _MP4DEC_API_H_ */\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/include/pvm4vdecoder.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef PVM4VDECODER_H_INCLUDED\n#define PVM4VDECODER_H_INCLUDED\n\n#ifndef OSCL_BASE_H_INCLUDED\n#include \"oscl_base.h\"\n#endif\n\n#ifndef VISUAL_HEADER_H_INCLUDED\n#include \"visual_header.h\"\n#endif\n\n#ifndef PVVIDEODECODERINTERFACE_H_INCLUDED\n#include \"pvvideodecoderinterface.h\"\n#endif\n\n\nclass PVM4VDecoder : public PVVideoDecoderInterface\n{\n    public:\n        virtual ~PVM4VDecoder();\n        static PVM4VDecoder* New(void);\n        /**\n        This is the initialization routine of the MPEG-4 video decoder for decoding an nLayers MPEG-4 bitstream (not\n        used for H.263 and ShortHeader Modes). Video object layer headers for all layers are passed in through the array\n        of buffer, volbuf[].  The size of each header is stored in volbuf_size[]. The iWidth and iHeight fields specify\n        the maximum decoded frame dimensions that should be handled by the decoder for H.263 and ShortHeader Modes (does\n        not have any effect for MPEG-4 Mode).  When the initialization routine is completed, for an MPEG-4 input\n        bitstream the display width and display height will be set to iWidth and iHeight, respectively. The mode\n        specifies the elementary bitstream type (0:H.263 and 1:M4V).  This function shall be called before any other\n        API's are used.\n         */\n        virtual bool    InitVideoDecoder(uint8 *volbuf[], int32 *volbuf_size, int32 nLayers, int32* iWidth, int32* iHeight, int *mode);\n\n        /**\n        This function frees all the memory used by the decoder library.\n         */\n        virtual void    CleanUpVideoDecoder(void);\n\n        /**\n        This function takes the compressed bitstreams of a multiple layer video and decodes the next YUV 4:2:0 frame to\n        be displayed.  The application has to allocate memory for the output frame before this function is called, and\n        should be passed into the decoder through yuv parameter. The allocated memory should be WORD aligned. The input\n        bitstream is decoded into this passed YUV buffer.  The unpadded (non-multiple of 16) size of the frame can be\n        obtained by calling GetVideoDimensions() api.  The input parameter, bitstream[], is an array of buffers that\n        stores the next frames to be decoded from all the layers.  The use_ext_timestamp[] parameter tells the decoder\n        to use the externally provided system timestamp (1) (ignoring internal bitstream timestamp) or bitstream (0)\n        timestamp. The buffer_size[]  parameter for video layers is updated with the remaining number of bytes in each\n        layer after consuming a frame worth of data from a particular layer. This is useful if multiple frame data is\n        passed into the video decoder at once. The decoder will decode one frame at a time.  If there is no data at the\n        time of decoding for layer idx, buffer_size[idx] shall be set to 0, otherwise it shall be set to the number of\n        bytes available.  Upon return, this array flags each layer that was used by decoder library.  For example, if\n        the buffer of layer idx is used by the library, buffer_size[idx] will be set to 0.  The application has to refill\n        the data in this buffer before the decoding of the next frame.  Note that the decoder may use more than one layer\n        of the bitstream at the same time (in the case of spatial/SNR scalability).  The function returns FALSE (0) if an\n        error has occurred during the decoding process.\n\n        The decoding operation requires at least 2 frame buffers. It is up to the user to manage the handling of frame\n        buffers. The frames are always decoded into the YUV-buffer that is passed in using the yuv frame pointer\n        parameter. This YUV frame buffer is kept as reference frame for decoding of the next frame. After decoding of\n        the frame following the current frame this buffer can be recycled or freed.\n\n         */\n        virtual bool    DecodeVideoFrame(uint8 *bitstream[], uint32 *timestamp, int32 *buffer_size, uint *use_ext_timestamp, uint8 *yuv);\n\n        /**\n        This function sets the reference frame for the decoder. The user should allocate the memory for the reference\n        frame. The size of the reference frame is determined after calling the GetVideoDimensions( ) api. The size\n        should be set to ((display_height + 15)/16)*16  x ((display_width + 15)/16)*16.\n         */\n        virtual void    SetReferenceYUV(uint8 *YUV);\n\n        /**\n        This function returns the display width and height of the video bitstream.\n         */\n        virtual void    GetVideoDimensions(int32 *display_width, int32 *display_height);\n\n        /**\n        This function sets the postprocessing type to be used. pp_mode =0 is no postprocessing, pp_mode=1 is deblocking\n        only, pp_mode=3 is deblocking + deringing.\n         */\n        virtual void    SetPostProcType(int32 mode);\n\n        /**\n        This function returns the timestamp of the most recently decoded video frame.\n         */\n        virtual uint32  GetVideoTimestamp(void);\n\n        /**\n        This function is used to get VOL header info.Currently only used to get profile and level info.\n         */\n        virtual bool GetVolInfo(VolInfo* pVolInfo);\n\n        /**\n        This function returns profile and level id.\n         */\n        virtual uint32 GetProfileAndLevel(void);\n\n        /**\n        This function returns average bitrate. (bits per sec)\n         */\n        virtual uint32 GetDecBitrate(void);\n\n        /**\n        This function checks whether the last decoded frame is an INTRA frame or not.\n         */\n        virtual bool    IsIFrame(void);\n\n        /**\n        This function performs postprocessing on the current decoded frame and writes the postprocessed frame to the\n        *yuv frame. If a separate yuv frame is not used for postprocessed frames NULL pointer can be passed in. In this\n        case the postprocessing is done in an internal yuv frame buffer. The pointer to his buffer can be obtained by\n        the next GetDecOutputFrame( ) api.\n         */\n        virtual void    DecPostProcess(uint8 *YUV);\n\n        /**\n        This function returns the pointer to the frame to be displayed.\n         */\n        virtual uint8*  GetDecOutputFrame(void);\n\n        /**\n        This function is not used.\n         */\n        virtual bool    ResetVideoDecoder(void);\n\n        /**\n        This function is not used.\n         */\n        virtual void  DecSetReference(uint8 *refYUV, uint32 timestamp);\n\n        /**\n        This function is not used.\n         */\n        virtual void  DecSetEnhReference(uint8 *refYUV, uint32 timestamp);\n\n    private:\n        PVM4VDecoder();\n        bool Construct(void);\n        void *iVideoCtrls;\n};\n\n#endif // PVM4VDECODER_H_INCLUDED\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/include/pvm4vdecoder_dpi.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n\n#ifndef __PVVIDEODECBASE_H\n#define __PVVIDEODECBASE_H\n\n\n// includes\n#include <e32std.h>\n#include <e32base.h>\n#include \"mp4dec_api.h\"\n#include \"dspmsgproto1.h\"\n#include \"dspmsgproto2.h\"\n#include \"dspmsgproto4.h\"\n#include \"dsp_msg_proto6.h\"\n#include \"DspMsgProto7.h\"\n#include \"dspmsgproto9.h\"\n#include \"dspmsgproto10.h\"\n#include \"dspmsgproto11.h\"\n#include \"dspmsgproto12.h\"\n#include \"dspmsgproto13.h\"\n#include \"dspmsgproto14.h\"\n#include \"dspmsgproto15.h\"\n#include \"dspmsgproto16.h\"\n#include \"dspmsgproto17.h\"\n#include \"dspmsgproto20.h\"\n\n#ifndef OSCL_BASE_H_INCLUDED\n#include \"oscl_base.h\"\n#endif\n\n#ifndef VISUAL_HEADER_H_INCLUDED\n#include \"visual_header.h\"\n#endif\n\n#ifndef PVVIDEODECODERINTERFACE_H_INCLUDED\n#include \"pvvideodecoderinterface.h\"\n#endif\n\n#include \"pvzcdt.h\"\n#define UChar uint8\n#define MAX_LAYERS 1\n\n#define USING_SYNC_2STEP\n\nclass PVM4VDecoder_DPI : public PVVideoDecoderInterface\n{\n    public:\n\n        PVM4VDecoder_DPI();\n        PVM4VDecoder_DPI(CPVDsp* aDsp);\n        ~PVM4VDecoder_DPI();\n        static PVM4VDecoder_DPI* New(void);\n\n        IMPORT_C static PVM4VDecoder_DPI* NewL(CPVDsp* aDsp);\n\n        IMPORT_C bool InitVideoDecoder(uint8 *volbuf[], int32 *volbuf_size, int32 nLayers, int32* iWidth, int32* iHeight, int *mode);\n\n////// not implemeneted/////////////////////////////////////////////////////////////////////////////////////////////////////////////\n        IMPORT_C bool  GetVolInfo(VolInfo* pVolInfo) {};\n        IMPORT_C void   DecPostProcess(uint8 *YUV) {};\n        IMPORT_C void  DecSetEnhReference(uint8 *refYUV, uint32 timestamp) {};\n        IMPORT_C void   SetReferenceYUV(uint8 *YUV) {};\n////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////\n\n        IMPORT_C void   GetVideoDimensions(int32 *display_width, int32 *display_height)\n        {\n            *display_width = GetVideoWidth();\n            *display_height = GetVideoHeight();\n        };\n\n#ifdef USING_SYNC_2STEP\n        IMPORT_C bool   getSynchResponse(uint32 timestamp[], int32 buffer_size[]);\n        IMPORT_C bool   DSPDecoderBusy();\n#endif\n\n        IMPORT_C int32  GetVideoWidth(void);\n        IMPORT_C int32  GetVideoHeight(void);\n        IMPORT_C int32  DPIFreeVideoDecCtrls(void);\n\n        IMPORT_C void   CleanUpVideoDecoder(void);\n        IMPORT_C bool   IsIFrame(void);\n\n        IMPORT_C void   SetPostProcType(int32 aMode);\n\n        IMPORT_C bool  DecodeVideoFrame(uint8 *bitstream[], uint32 *timestamp, int32 *buffer_size, uint *use_ext_ts, uint8 *YUV);\n        IMPORT_C bool  DecodeVideoFrame(uint8 *bitstream[], uint32 *timestamp, int32 *buffer_size, uint *use_ext_ts, uint8 *YUV, TRequestStatus *aRequestStatus);\n        IMPORT_C bool  DecodeVideoFrameAsyncResp(uint32 timestamp[], int32 buffer_size[]);\n\n        IMPORT_C bool   DecodeStillVideoFrame(uint8 *buffer, int32 buf_size, uint8 *YUV);\n\n        IMPORT_C bool   GetStillVideoFrameSize(uint8 *buffer, int32 buf_size, int32 *width, int32 *height);\n\n        IMPORT_C uint8*  GetDecOutputFrame(void);\n        IMPORT_C void    GetDecOutputFrame(uint8*);\n\n        IMPORT_C uint8*  CopyDecOutputFrameToSharedMemBuf(void);\n\n        IMPORT_C bool   ResetVideoDecoder(void);\n\n        IMPORT_C TDspPointer DPIAllocVideoDecCtrls(void);\n\n        IMPORT_C uint32 GetVideoTimestamp(void);\n\n        IMPORT_C uint32 GetProfileAndLevel(void);\n\n        IMPORT_C uint32 GetDecBitrate(void);\n\n        // only port the API's used in PVPlayer 2.0\n\n        IMPORT_C bool   ExtractVolHeader(uint8 *video_buffer, uint8 *vol_header, int32 *vol_header_size);\n\n        IMPORT_C void DecSetReference(uint8 *refYUV, uint32 timestamp);\n\n#if defined USE_PV_TRANSFER_BUFFER\n        IMPORT_C inline RPVTransferBuffer& GetTxTransferBuffer()\n        {\n            return iTxTransferBuffer;\n        }\n        IMPORT_C inline RPVTransferBuffer& GetRxTransferBuffer()\n        {\n            return iRxTransferBuffer;\n        }\n#else\n        IMPORT_C inline RTransferBuffer& GetTxTransferBuffer()\n        {\n            return iTxTransferBuffer;\n        }\n\n        IMPORT_C inline RTransferBuffer& GetRxTransferBuffer()\n        {\n            return iRxTransferBuffer;\n        }\n#endif\n\n        IMPORT_C inline TInt GetNLayers()\n        {\n            return iNLayers;\n        }\n\n        IMPORT_C inline void SetNLayers(int32 aNLayers)\n        {\n            iNLayers = aNLayers;\n        }\n\n        TDspPointer iVideoCtrls;\n\n    protected:\n\n        void ConstructL(void);\n        bool Construct();\n\n        CPVDsp        *iDsp;\n        TDspPointer   *iBitstreamDspPointer;\n        int32           iNLayers;\n        CDspMsgProto7  dspMsgProto7;\n        CDspMsgProto15 dspMsgProto15;\n        CDspMsgProto16 dspMsgProto16;\n\n    private:\n\n        bool iWaitingBitstream;\n\n        // the order of object declaration is required to ensure the proper sequence of constructor invocation\n\n#if defined USE_PV_TRANSFER_BUFFER\n        RPVTransferBuffer iTxTransferBuffer;\n        RPVTransferWindow iTxTransferWindow;\n        RPVTransferBuffer iRxTransferBuffer;\n        RPVTransferWindow iRxTransferWindow;\n#else\n        RTransferBuffer iTxTransferBuffer;\n        RTransferWindow iTxTransferWindow;\n        RTransferBuffer iRxTransferBuffer;\n        RTransferWindow iRxTransferWindow;\n#endif\n\n        RPVTransferBuffer iVideoTransBuf[MAX_LAYERS];\n        RPVTransferWindow iVideoTransWin[MAX_LAYERS];\n        RPVTransferBuffer iVolHeaderTransBuf[MAX_LAYERS];\n        RPVTransferWindow iVolHeaderTransWin[MAX_LAYERS];\n        RPVTransferBuffer iDecodeBusyFlagTransBuf;\n        RPVTransferWindow iDecodeBusyFlagTransWin;\n\n        unsigned short *iDecodeBusyFlagPtr;\n        unsigned char  *iVolHeader[MAX_LAYERS];\n        unsigned char  *iVideoBuffer[MAX_LAYERS];\n\n        uint iHeight;\n        uint iWidth;\n\n        uint iBufferHeight;\n        uint iBufferWidth;\n\n        CDspMsgProto1 dspMsgProto1;\n        CDspMsgProto2 dspMsgProto2;\n        CDspMsgProto4 dspMsgProto4;\n        CDspMsgProto6 dspMsgProto6; //  for SBR\n        CDspMsgProto9 dspMsgProto9;\n        CDspMsgProto10 dspMsgProto10;\n        CDspMsgProto11 dspMsgProto11;\n        CDspMsgProto12 dspMsgProto12;\n        CDspMsgProto13 dspMsgProto13;\n        CDspMsgProto14 dspMsgProto14;\n        CDspMsgProto17 dspMsgProto17;\n        CDspMsgProto20 dspMsgProto20;\n\n        int32 iCurrVideoTimeStamp;\n        uint8* iYuvOutputPtr;\n};\n\n#endif\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/include/pvm4vdecoder_factory.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef PVM4VDECODER_FACTORY_H_INCLUDED\n#define PVM4VDECODER_FACTORY_H_INCLUDED\n\n#ifndef OSCL_BASE_H_INCLUDED\n#include \"oscl_base.h\"\n#endif\n\n#ifndef OSCL_MEM_H_INCLUDED\n#include \"oscl_mem.h\"\n#endif\n\nclass PVVideoDecoderInterface;\n\nclass PVM4VDecoderFactory\n{\n    public:\n        /**\n         * Creates an instance of a PVM4VDecoder. If the creation fails, this function will leave.\n         *\n         * @returns A pointer to an instance of PVM4VDecoder as PVVideoDecoderInterface reference or leaves if instantiation fails\n         **/\n        OSCL_IMPORT_REF static PVVideoDecoderInterface* CreatePVM4VDecoder(void);\n\n        /**\n         * Deletes an instance of PVM4VDecoder and reclaims all allocated resources.\n         *\n         * @param aVideoDec The PVM4VDecoder instance to be deleted\n         * @returns A status code indicating success or failure of deletion\n         **/\n        OSCL_IMPORT_REF static bool DeletePVM4VDecoder(PVVideoDecoderInterface* aVideoDec);\n};\n\n#endif\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/include/pvvideodecoderinterface.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef PVVIDEODECODERINTERFACE_H_INCLUDED\n#define PVVIDEODECODERINTERFACE_H_INCLUDED\n\n// includes\n#ifndef OSCL_BASE_H_INCLUDED\n#include \"oscl_base.h\"\n#endif\n\n#ifndef VISUAL_HEADER_H_INCLUDED\n#include \"visual_header.h\"\n#endif\n\n#include \"oscl_aostatus.h\"\n\n// PVVideoDecoderInterface pure virtual interface class\nclass PVVideoDecoderInterface\n{\n    public:\n        virtual ~PVVideoDecoderInterface() {};\n        virtual bool    InitVideoDecoder(uint8 *volbuf[], int32 *volbuf_size, int32 nLayers, int32* iWidth, int32* iHeight, int *mode) = 0;\n        virtual void    CleanUpVideoDecoder(void) = 0;\n        virtual bool    DecodeVideoFrame(uint8 *bitstream[], uint32 *timestamp, int32 *buffer_size, uint *use_ext_ts, uint8 *yuv) = 0;\n\n        // decode for dual core asynchronous operation\n        virtual bool    DecodeVideoFrame(uint8 *bitstream[], uint32 *timestamp, int32 *buffer_size, uint *use_ext_ts, uint8 *yuv, OsclAOStatus *asynch)\n        {\n            OSCL_UNUSED_ARG(bitstream);\n            OSCL_UNUSED_ARG(timestamp);\n            OSCL_UNUSED_ARG(buffer_size);\n            OSCL_UNUSED_ARG(use_ext_ts);\n            OSCL_UNUSED_ARG(yuv);\n            OSCL_UNUSED_ARG(asynch);\n            return true;\n        };\n        virtual bool    DecodeVideoFrameAsyncResp(uint32 timestamp[], int32 buffer_size[])\n        {\n            OSCL_UNUSED_ARG(timestamp);\n            OSCL_UNUSED_ARG(buffer_size);\n            return true;\n        };\n//  virtual uint8*  GetDecOutputFrame(void) {};\n        virtual void    GetDecOutputFrame(uint8*) {};\n        virtual bool    getSynchResponse(uint32 timestamp[], int32 buffer_size[])\n        {\n            OSCL_UNUSED_ARG(timestamp);\n            OSCL_UNUSED_ARG(buffer_size);\n            return true;\n        };\n        virtual bool    DSPDecoderBusy()\n        {\n            return true;\n        };\n\n        virtual void    SetReferenceYUV(uint8 *YUV) = 0;\n        virtual void    GetVideoDimensions(int32 *display_width, int32 *display_height) = 0;\n        virtual void    SetPostProcType(int32 mode) = 0;\n        virtual uint32  GetVideoTimestamp(void) = 0;\n        virtual bool    GetVolInfo(VolInfo* pVolInfo) = 0;\n        virtual bool    IsIFrame(void) = 0;\n        virtual void    DecPostProcess(uint8 *YUV) = 0;\n        virtual uint8*  GetDecOutputFrame(void) = 0;\n        virtual bool    ResetVideoDecoder(void) = 0;\n        virtual void    DecSetReference(uint8 *refYUV, uint32 timestamp) = 0;\n        virtual void    DecSetEnhReference(uint8 *refYUV, uint32 timestamp) = 0;\n        virtual uint32 GetProfileAndLevel(void) = 0;\n        virtual uint32 GetDecBitrate(void) = 0; // This function returns the average bits per second.\n};\n\n#endif // PVVIDEODECODERINTERFACE_H_INCLUDED\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/include/visual_header.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef _VISUAL_HEADER_H\n#define _VISUAL_HEADER_H\n\n#ifndef _PV_TYPES_ // In order to compile in MDF wrapper\n#define _PV_TYPES_\n\ntypedef uint Bool;\n\n#endif // #ifndef _PV_TYPES_\n\n\ntypedef struct tagVolInfo\n{\n    int32   shortVideoHeader;       /* shortVideoHeader mode */\n\n    /* Error Resilience Flags */\n    int32   errorResDisable;        /* VOL disable error resilence mode(Use Resynch markers) */\n    int32   useReverseVLC;          /* VOL reversible VLCs */\n    int32   dataPartitioning;       /* VOL data partitioning */\n\n    /* Parameters used for scalability */\n    int32   scalability;            /* VOL scalability (flag) */\n\n    int32   nbitsTimeIncRes;        /* number of bits for time increment () */\n\n    int32   profile_level_id;       /* profile and level */\n\n\n} VolInfo;\n\n#endif // #ifndef _VISUAL_HEADER_H\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/oscl/oscl_base.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef OSCL_BASE_H_INCLUDED\n#define OSCL_BASE_H_INCLUDED\n\n#include \"oscl_config.h\"\n#include \"oscl_types.h\"\n#include \"oscl_error.h\"\n\nclass OsclBase\n{\n    public:\n        OSCL_IMPORT_REF  static void Init() {};\n        OSCL_IMPORT_REF  static void Cleanup() {};\n};\n\nclass OsclErrorTrap\n{\n    public:\n\n        OSCL_IMPORT_REF  static void Init() {};\n        OSCL_IMPORT_REF  static void Cleanup() {};\n        OSCL_IMPORT_REF  static void leave(int) {};\n};\n\nclass OsclMem\n{\n    public:\n        OSCL_IMPORT_REF  static void Init() {};\n        OSCL_IMPORT_REF  static void Cleanup() {};\n};\n\nclass OsclRequestStatus\n{\n    public:\n        OsclRequestStatus();\n        OsclRequestStatus(int32 aVal)\n        {\n            (void)(aVal);\n        };\n        int32 operator=(int32 aVal);\n        int32 operator==(int32 aVal) const;\n        int32 operator!=(int32 aVal) const;\n        int32 operator>=(int32 aVal) const;\n        int32 operator<=(int32 aVal) const;\n        int32 operator>(int32 aVal) const;\n        int32 operator<(int32 aVal) const;\n        int32 Int() const;\n    private:\n        int32 iStatus;\n};\n\nclass OsclActiveObject\n{\n    public:\n        /**\n         * Scheduling priorities.\n         */\n        enum TPriority\n        {\n            /**\n            A low priority, useful for active objects representing\n            background processing.\n            */\n            EPriorityIdle = -100,\n            /**\n            A priority higher than EPriorityIdle but lower than EPriorityStandard.\n            */\n            EPriorityLow = -20,\n            /**\n            Most active objects will have this priority.\n            */\n            EPriorityStandard = 0,\n            /**\n            A priority higher than EPriorityStandard; useful for active objects\n            handling user input.\n            */\n            EPriorityUserInput = 10,\n            /**\n            A priority higher than EPriorityUserInput.\n            */\n            EPriorityHigh = 20\n        };\n\n        /**\n         * Constructor.\n         * @param aPriority (input param): scheduling priority\n         * @param name (inpup param): optional name for this AO.\n         */\n        OSCL_IMPORT_REF OsclActiveObject(int32 aPriority, const char name[]);\n\n        /**\n         * Destructor.\n         */\n        OSCL_IMPORT_REF virtual ~OsclActiveObject();\n\n        /**\n         * Set request active for this AO.\n         * Will panic if the request is already active,\n         * or the active object is not added to any scheduler,\n         * or the calling thread context does not match\n         * the scheduler thread.\n         */\n        OSCL_IMPORT_REF void SetBusy();\n\n        /**\n         * Return true if this AO is active,\n         * false otherwise.\n         */\n        OSCL_IMPORT_REF bool IsBusy() const;\n\n        /**\n         * Set request active for this AO and set the status to pending.\n         * PendForExec is identical to SetBusy, but it\n         * additionally sets the request status to OSCL_REQUEST_PENDING.\n         *\n         */\n        OSCL_IMPORT_REF void PendForExec();\n\n        /**\n         * Complate the active request for the AO.  Can be\n         * called from any thread.\n         * @param aStatus: request completion status.\n         */\n        OSCL_IMPORT_REF void PendComplete(int32 aStatus);\n\n\n        /**\n         * Add this AO to the current thread's scheduler.\n         */\n        OSCL_IMPORT_REF void AddToScheduler();\n\n        /**\n         * Return true if this AO is added to the scheduler,\n         * false otherwise.\n         */\n        OSCL_IMPORT_REF bool IsAdded() const;\n\n        /**\n         * Remove this AO from its scheduler.\n         * Will panic if the calling thread context does\n         * not match the scheduling thread.\n         * Cancels any active request before removing.\n         */\n        OSCL_IMPORT_REF void RemoveFromScheduler();\n\n        /**\n         * Deque is identical to RemoveFromScheduler\n         * It's only needed to prevent accidental usage\n         * of Symbian CActive::Deque.\n         */\n        OSCL_IMPORT_REF void Deque();\n\n        /**\n         * Complete this AO's request immediately.\n         * If the AO is already active, this will do nothing.\n         * Will panic if the AO is not acced to any scheduler,\n         * or if the calling thread context does not match the\n         * scheduling thread.\n         */\n        OSCL_IMPORT_REF void RunIfNotReady();\n\n        /**\n         * Cancel any active request.\n         * If the request is active, this will call the DoCancel\n         * routine, wait for the request to cancel, then set the\n         * request inactive.  The AO will not run.\n         * If the request is not active, it does nothing.\n         * Request must be canceled from the same thread\n         * in which it is scheduled.\n         */\n        OSCL_IMPORT_REF void Cancel();\n\n        /**\n        * Return scheduling priority of this active object.\n        */\n        OSCL_IMPORT_REF int32 Priority() const;\n\n        /**\n        * Request status access\n        */\n        OSCL_IMPORT_REF int32 Status()const;\n        OSCL_IMPORT_REF void SetStatus(int32);\n        OSCL_IMPORT_REF int32 StatusRef();\n\n    protected:\n        /**\n         * Cancel request handler.\n         * This gets called by scheduler when the request\n         * is cancelled.  The default routine will complete\n         * the request.  If any additional action is needed,\n         * the derived class may override this.  If the derived class\n         * does override DoCancel, it must complete the request.\n         */\n        //OSCL_IMPORT_REF virtual void DoCancel();\n\n        /**\n        * Run Error handler.\n        * This gets called by scheduler when the Run routine leaves.\n        * The default implementation simply returns the leave code.\n        * If the derived class wants to handle errors from Run,\n        * it may override this.  The RunError should return OsclErrNone\n        * if it handles the error, otherwise it should return the\n        * input error code.\n        * @param aError: the leave code generated by the Run.\n        */\n        //OSCL_IMPORT_REF virtual int32 RunError(int32 aError);\n};\n\n\nclass OsclTimerObject\n{\n    public:\n        /**\n         * Constructor.\n         * @param aPriority (input param): scheduling priority\n         * @param name (input param): optional name for this AO.\n         */\n        OSCL_IMPORT_REF OsclTimerObject(int32 aPriority, const char name[]);\n\n        /**\n         * Destructor.\n         */\n\n        //OSCL_IMPORT_REF virtual ~OsclTimerObject();\n\n        /**\n         * Add this AO to the current thread's scheduler.\n         */\n        OSCL_IMPORT_REF void AddToScheduler();\n\n        /**\n         * Return true if this AO is added to the scheduler,\n         * false otherwise.\n         */\n        OSCL_IMPORT_REF bool IsAdded() const;\n\n        /**\n         * Remove this AO from its scheduler.\n         * Will panic if the calling thread context does\n         * not match the scheduling thread.\n         * Cancels any active request before removing.\n         */\n        OSCL_IMPORT_REF void RemoveFromScheduler();\n\n        /**\n         * Deque is identical to RemoveFromScheduler\n         * It's only needed to prevent accidental usage\n         * of Symbian CActive::Deque.\n         */\n        OSCL_IMPORT_REF void Deque();\n\n        /**\n        * 'After' sets the request active, with request status\n        * OSCL_REQUEST_STATUS_PENDING, and starts a timer.\n        * When the timer expires, the request will complete with\n        * status OSCL_REQUEST_ERR_NONE.\n        * Must be called from the same thread in which the\n        * active object is scheduled.\n        * Will panic if the request is already active, the object\n        * is not added to any scheduler, or the calling thread\n        * does not match the scheduling thread.\n        * @param anInterval: timeout interval in microseconds.\n        */\n        OSCL_IMPORT_REF void After(int32 aDelayMicrosec);\n\n        /**\n         * Complete the request after a time interval.\n         * RunIfNotReady is identical to After() except that it\n         * first checks the request status, and if it is already\n         * active, it does nothing.\n         *\n         * @param aDelayMicrosec (input param): delay in microseconds.\n         */\n        OSCL_IMPORT_REF void RunIfNotReady(uint32 aDelayMicrosec = 0);\n\n        /**\n         * Set request active for this AO.\n         * Will panic if the request is already active,\n         * or the active object is not added to any scheduler,\n         * or the calling thread context does not match\n         * the scheduler thread.\n         */\n        OSCL_IMPORT_REF void SetBusy();\n\n        /**\n         * Return true if this AO is active,\n         * false otherwise.\n         */\n        OSCL_IMPORT_REF bool IsBusy() const;\n\n        /**\n         * Cancel any active request.\n         * If the request is active, this will call the DoCancel\n         * routine, wait for the request to cancel, then set the\n         * request inactive.  The AO will not run.\n         * If the request is not active, it does nothing.\n         * Request must be canceled from the same thread\n         * in which it is scheduled.\n         */\n        OSCL_IMPORT_REF void Cancel();\n\n        /**\n        * Return scheduling priority of this active object.\n        */\n        OSCL_IMPORT_REF int32 Priority() const;\n        /**\n        * Request status access\n        */\n        OSCL_IMPORT_REF int32 Status()const;\n        OSCL_IMPORT_REF void SetStatus(int32);\n        OSCL_IMPORT_REF int32 StatusRef();\n\n    protected:\n        /**\n         * Cancel request handler.\n         * This gets called by scheduler when the request\n         * is cancelled.  The default routine will cancel\n         * the timer.  If any additional action is needed,\n         * the derived class may override this.  If the\n         * derived class does override this, it should explicitly\n         * call OsclTimerObject::DoCancel in its own DoCancel\n         * routine.\n         */\n        //OSCL_IMPORT_REF virtual void DoCancel();\n\n        /**\n        * Run Error handler.\n        * This gets called by scheduler when the Run routine leaves.\n        * The default implementation simply returns the leave code.\n        * If the derived class wants to handle errors from Run,\n        * it may override this.  The RunError should return OsclErrNone\n        * if it handles the error, otherwise it should return the\n        * input error code.\n        * @param aError: the leave code generated by the Run.\n        */\n        //OSCL_IMPORT_REF virtual int32 RunError(int32 aError);\n};\n\n#endif // OSCL_BASE_H_INCLUDED\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/oscl/oscl_base_macros.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef OSCL_BASE_MACROS_H_INCLUDED\n#define OSCL_BASE_MACROS_H_INCLUDED\n\n#ifndef OSCL_UNUSED_ARG\n#define OSCL_UNUSED_ARG(x) (void)(x)\n#endif\n\n#endif // OSCL_BASE_MACROS_H_INCLUDED\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/oscl/oscl_config.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef OSCL_CONFIG_H_INCLUDED\n#define OSCL_CONFIG_H_INCLUDED\n\n#define OSCL_HAS_BREW_SUPPORT 0   //Not yet supported\n\n#define OSCL_HAS_SYMBIAN_SUPPORT 0 // Not yet supported\n\n#define OSCL_HAS_LINUX_SUPPORT 1\n\n#endif // OSCL_CONFIG_H_INCLUDED\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/oscl/oscl_dll.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef OSCL_DLL_H_INCLUDED\n#define OSCL_DLL_H_INCLUDED\n\n#define OSCL_DLL_ENTRY_POINT() void oscl_dll_entry_point() {}\n\n\n/**\n * Default DLL entry/exit point function.\n *\n * The body of the DLL entry point is given.  The macro\n * only needs to be declared within the source file.\n *\n * Usage :\n *\n * OSCL_DLL_ENTRY_POINT_DEFAULT()\n */\n\n#define OSCL_DLL_ENTRY_POINT_DEFAULT()\n\n\n\n#endif // OSCL_DLL_H_INCLUDED\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/oscl/oscl_error.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef OSCL_ERROR_H_INCLUDED\n#define OSCL_ERROR_H_INCLUDED\n\n\n#define OSCL_LEAVE(x)\n\n\n#endif //OSCL_ERROR_H_INCLUDED\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/oscl/oscl_error_codes.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n\n#ifndef OSCL_ERROR_CODES_H_INCLUDED\n#define OSCL_ERROR_CODES_H_INCLUDED\n\n\n/** Leave Codes\n*/\ntypedef int32 OsclLeaveCode;\n\n#define OsclErrNone 0\n#define OsclErrGeneral 100\n#define OsclErrNoMemory 101\n#define OsclErrCancelled 102\n#define OsclErrNotSupported 103\n#define OsclErrArgument 104\n#define OsclErrBadHandle 105\n#define OsclErrAlreadyExists 106\n#define OsclErrBusy 107\n#define OsclErrNotReady 108\n#define OsclErrCorrupt 109\n#define OsclErrTimeout 110\n#define OsclErrOverflow 111\n#define OsclErrUnderflow 112\n#define OsclErrInvalidState 113\n#define OsclErrNoResources 114\n\n/** For backward compatibility with old definitions\n*/\n#define OSCL_ERR_NONE OsclErrNone\n#define OSCL_BAD_ALLOC_EXCEPTION_CODE OsclErrNoMemory\n\n/** Return Codes\n*/\ntypedef int32 OsclReturnCode;\n\n#define  OsclSuccess 0\n#define  OsclPending 1\n#define  OsclFailure -1\n\n#endif\n\n/*! @} */\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/oscl/oscl_exception.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n\n#ifndef OSCL_EXCEPTION_H_INCLUDED\n#define OSCL_EXCEPTION_H_INCLUDED\n\n\n\n#endif // INCLUDED_OSCL_EXCEPTION_H\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/oscl/oscl_math.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef OSCL_MATH_H_INCLUDED\n#define OSCL_MATH_H_INCLUDED\n\n#include <math.h>\n\n\n\n#define oscl_pow        pow\n#define oscl_exp        exp\n#define oscl_sqrt       sqrt\n#define oscl_log        log\n#define oscl_cos        cos\n#define oscl_sin        sin\n#define oscl_tan        tan\n#define oscl_asin       asin\n\n#endif // OSCL_MATH_H_INCLUDED\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/oscl/oscl_mem.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef OSCL_MEM_H_INCLUDED\n#define OSCL_MEM_H_INCLUDED\n\n#include \"oscl_types.h\"\n\n#define OSCLMemSizeT size_t\n\n#define oscl_memcpy(dest, src, count)       memcpy((void *)(dest), (const void *)(src), (OSCLMemSizeT)(count))\n#define oscl_memset(dest, ch, count)        memset((void *)(dest), (unsigned char)(ch), (OSCLMemSizeT)(count))\n#define oscl_memmove(dest, src, bytecount)  memmove((void *)(dest), (const void *)(src), (OSCLMemSizeT)(bytecount))\n#define oscl_memcmp(buf1, buf2, count)      memcmp( (const void *)(buf1), (const void *)(buf2), (OSCLMemSizeT)(count))\n#define oscl_malloc(size)                      malloc((OSCLMemSizeT)(size))\n#define oscl_free(memblock)                 free((void *)(memblock))\n#define OSCL_ARRAY_DELETE(ptr)              delete [] ptr\n#define OSCL_ARRAY_NEW(T, count)            new T[count]\n#define OSCL_DELETE(memblock)               delete memblock\n#define OSCL_NEW(arg)                       new arg\n\n#endif // OSCL_MEM_H_INCLUDED\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/oscl/oscl_types.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*! \\file oscl_types.h\n    \\brief This file contains basic type definitions for common use across platforms.\n\n*/\n\n\n\n#ifndef OSCL_TYPES_H_INCLUDED\n#define OSCL_TYPES_H_INCLUDED\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <stdarg.h>\n#include <string.h>\n#include <limits.h>\n#include <string.h>\n\n//! A typedef for a signed 8 bit integer.\n#ifndef int8\ntypedef signed char int8;\n#endif\n\n//! A typedef for an unsigned 8 bit integer.\n#ifndef uint8\ntypedef unsigned char uint8;\n#endif\n\n//! A typedef for a signed 16 bit integer.\n#ifndef int16\ntypedef short int16;\n#endif\n\n//! A typedef for an unsigned 16 bit integer.\n#ifndef uint16\ntypedef unsigned short uint16;\n#endif\n\n//! A typedef for a signed 32 bit integer.\n#ifndef int32\ntypedef long int32;\n#endif\n\n//! A typedef for an unsigned 32 bit integer.\n#ifndef uint32\ntypedef unsigned long uint32;\n#endif\n\n#ifndef sint8\ntypedef signed char sint8;\n#endif\n\n#ifndef OsclFloat\ntypedef float OsclFloat;\n#endif\n\n#ifndef uint\ntypedef unsigned int uint;\n#endif\n\n\n#ifndef int64\n#define OSCL_HAS_NATIVE_INT64_TYPE 1\n#define OSCL_NATIVE_INT64_TYPE long long\ntypedef OSCL_NATIVE_INT64_TYPE int64;\n#endif // int64\n\n#ifndef uint64\n#define OSCL_HAS_NATIVE_UINT64_TYPE  1\n#define OSCL_NATIVE_UINT64_TYPE unsigned long long\ntypedef OSCL_NATIVE_UINT64_TYPE uint64;\n#endif // uint64\n\n#ifndef OSCL_UNUSED_ARG\n#define OSCL_UNUSED_ARG(x) (void)(x)\n#endif\n\n#ifndef OSCL_EXPORT_REF\n#define OSCL_EXPORT_REF\n#endif\n\n#ifndef OSCL_IMPORT_REF\n#define OSCL_IMPORT_REF\n#endif\n\n#if defined(OSCL_DISABLE_INLINES)\n#define OSCL_INLINE\n#define OSCL_COND_EXPORT_REF OSCL_EXPORT_REF\n#define OSCL_COND_IMPORT_REF OSCL_IMPORT_REF\n#else\n#define OSCL_INLINE inline\n#define OSCL_COND_IMPORT_REF\n#define OSCL_COND_IMPORT_REF\n#endif\n\n#ifndef INT64\n#define INT64 int64\n#endif\n\n#ifndef UINT64\n#define UINT64 uint64\n#endif\n\n#ifndef UINT64_HILO\n#define UINT64_HILO(a,b) ((a<<32) | b)\n#endif\n\n\n#endif // OSCL_TYPES_H_INCLUDED\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/oscl/osclconfig_compiler_warnings.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n// -*- c++ -*-\n// = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =\n\n//       O S C L C O N F I G _ C O M P I L E R  _ W A R N I N G S\n\n// = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =\n\n\n/*! \\file osclconfig_compiler_warnings.h\n *  \\brief This file contains the ability to turn off/on compiler warnings\n *\n */\n\n// This macro enables the \"#pragma GCC system_header\" found in any header file that\n// includes this config file.\n// \"#pragma GCC system_header\" suppresses compiler warnings in the rest of that header\n// file by treating the header as a system header file.\n// For instance, foo.h has 30 lines, \"#pragma GCC system_header\" is inserted at line 10,\n// from line 11 to the end of file, all compiler warnings are disabled.\n// However, this does not affect any files that include foo.h.\n//\n#ifdef __GNUC__\n#define OSCL_DISABLE_GCC_WARNING_SYSTEM_HEADER\n#endif\n\n#define OSCL_FUNCTION_PTR(x) (&x)\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/3GPVideoParser.cpp",
    "content": "/*\n * Copyright (C) 2009 OrangeLabs\n * 3GPVideoParser.cpp\n *\n *  Created on: 12 août 2009\n *      Author: rglt1266\n */\n#define LOG_TAG \"3GPPSampleReader\"\n#include <stdio.h>\n#include <stdlib.h>\n#include <stdarg.h>\n#include <string.h>\n#include <limits.h>\n#include \"3GPVideoParser.h\"\n\n/* Variables */\nFILE* f = NULL; // File to parse\n\nuint32 TimeScale = 0; // Ticks per second\nuint32 VideoLength = 0; // Video length (time)\nuint32 VideoWidth = 0;\nuint32 VideoHeight = 0;\nchar VideoCodec[5]; // Codec type: d263/mp4v....\n\nuint32 moovAtomPtr = 0;\nuint32 moovAtomSize = 0;\nuint32 trakAtomPtr = 0;\nuint32 trakAtomSize = 0;\n\n/* Buffers and pointers*/\nuint8* moovBuff = 0;\nuint8* sttsPtr = 0;\nuint8* stcoPtr = 0;\nuint8* stszPtr = 0;\nuint8* stscPtr = 0;\nuint8* stsdPtr = 0;\nSample* samplePtr = 0;\n\n/**\n * Endien convert\n */\nuint32 EndienConvert (uint32 input){\n\treturn ((input & 0xFF) << 24) | ((input & 0xFF00) << 8) |\t((uint32)(input & 0xFF0000) >> 8) | ((uint32)(input & 0xFF000000) >> 24);\n}\n\n/**\n * Get a uint32 value at a precised position in a uint8 buffer\n */\nuint32 getUint32FromUint8Buffer (uint8* buffer,uint32 offset){\n\treturn ( ((buffer[offset]<<24)& 0xff000000) | ((buffer[offset+1]<<16)& 0xff0000) | ((buffer[offset+2]<<8)& 0xff00) | ((buffer[offset+3])& 0xff));\n}\n\n/**\n * Find a particular value in a uint8 buffer reading uint32\n */\nint32 findAtom (uint8* buffer,uint32 bufferSize, uint32 valueToFind){\n\tuint32 tmp;\n\tuint32 i = 0;\n\tfor (i=0;i<(bufferSize-4);i++){\n\t\ttmp = getUint32FromUint8Buffer(buffer,i);\n\t\tif (tmp == valueToFind){\n\t\t\treturn i-4;\n\t\t}\n\t}\n\treturn VPAtomError;\n}\n\n/**\n * Find a particular value in a uint32 buffer\n */\nint32 findAtom (uint32* buffer,uint32 bufferSize, uint32 valueToFind){\n\tuint32 i = 0;\n\tfor (i=0;i<(bufferSize);i++){\n\t\tif (EndienConvert(buffer[i]) == valueToFind){\n\t\t\treturn i;\n\t\t}\n\t}\n\treturn VPAtomError;\n}\n\n/**\n* Cleanup the parser\n*\n* @return error code\n*/\nint cleanupParser(void){\n\t/* Clean atom info */\n\tfree(moovBuff);\n\tVideoWidth = 0;\n\tVideoHeight = 0;\n\tVideoCodec[0] = '\\0';\n\tVideoLength = 0;\n\treturn VPAtomSucces;\n}\n\n/**\n* Init the parser\n*\n* @param filePath path of the file to read\n* @param width check if the video width is correct\n* @param heigth check if the video height is correct\n* @return error code\n*/\nint Init3GPVideoParser (char *filePath){\n\tuint32 anAtomSize = 0;\n\tuint32 anAtomType = 0;\n\tuint32 trakOffset = 0;\n\n\tint32 pos = 0;\n\tint32 fileSize;\n\n\t/* Load file */\n\tf = fopen(filePath,\"r\");\n\tif (f == NULL) {\n\t  return VPAtomError;\n\t}\n\tfseek( f, 0L, SEEK_END );\n\tfileSize = ftell( f );\n\tif (fileSize <= 8 ) return VPAtomError; // File is too small !\n\n\t/* Check if file format is correct ie it's a 3gp file*/\n\tfseek(f,4,SEEK_SET);\n\tfread(&anAtomType,sizeof(uint32),1,f);\n\tanAtomType = EndienConvert(anAtomType);\n\tif (anAtomType != AtomFtyp) return VPAtomError;\n\n\t/* Start parsing from begining*/\n\trewind (f);\n\n\t// Find Moov Atom\n\twhile (ftell(f)<fileSize){\n\t\tfread(&anAtomSize,sizeof(uint32),1,f);\n\t\tanAtomSize = EndienConvert(anAtomSize);\n\t\tfread(&anAtomType,sizeof(uint32),1,f);\n\t\tanAtomType = EndienConvert(anAtomType);\n\t\tif (anAtomType == AtomMoov){\n\t\t\tmoovAtomPtr=ftell(f)-8;\n\t\t\tmoovAtomSize=anAtomSize;\n\t\t}\n\t\t// Switch to next Atom\n\t\tfseek(f,anAtomSize-8,SEEK_CUR);/* -8 is because we already read 2*4 Bytes of this Atom*/\n\t}\n\n\t/* Copy moov to buffer */\n\tmoovBuff = (uint8*)malloc(moovAtomSize);\n\tfseek(f,moovAtomPtr,SEEK_SET);\n\tfor (uint32 j=0;j<(moovAtomSize);j++){\n\t\tfread(&moovBuff[j],1,1,f);\n\t}\n\n\t// Find trak(s) Atom\n\tpos = findAtom(moovBuff,moovAtomSize,AtomTrak);\n\twhile (pos > 0) {\n\t\tint32 trakSize = getUint32FromUint8Buffer(moovBuff,pos);\n\t\tif (findAtom(moovBuff+pos,trakSize,AtomVmhd)){\n\t\t\ttrakAtomPtr = moovAtomPtr+pos;\n\t\t\ttrakAtomSize = trakSize;\n\t\t\tbreak;\n\t\t} else {\n\t\t\t// This is not the videotrack\n\t\t}\n\t\t// Trying to find new trak\n\t\tpos = findAtom(moovBuff+pos,moovAtomSize-pos,AtomTrak);\n\t}\n\tif (trakAtomPtr == 0) {\n\t    return VPAtomError;\n\t}\n\n\n\ttrakOffset = trakAtomPtr - moovAtomPtr;\n\n\t// Find MDHD\n\tpos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomMdhd);\n\tif (pos > 0){\n\t\tuint8* Ptr = moovBuff + trakOffset + pos + 16; // Skip Atom size and Atom name\n\t\tTimeScale = getUint32FromUint8Buffer(Ptr,4);\n\t\tVideoLength = getUint32FromUint8Buffer(Ptr,8);\n\t} else {\n\t\treturn VPAtomError;\n\t}\n\n\t// Find STTS\n\tpos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomStts);\n\tif (pos > 0){\n\t\tsttsPtr = moovBuff + trakOffset + pos + 16; // Skip Atom size and Atom name\n\t} else {\n\t\treturn VPAtomError;\n\t}\n\n\t// Find STSZ\n\tpos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomStsz);\n\tif (pos > 0){\n\t\tstszPtr = moovBuff + trakOffset + pos + 20; // Skip Atom size and Atom name\n\t} else {\n\t\treturn VPAtomError;\n\t}\n\t// Find STCO\n\tpos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomStco);\n\tif (pos > 0){\n\t\tstcoPtr = moovBuff + trakOffset + pos + 16; // Skip Atom size, Atom name, ...\n\t} else {\n\t\treturn VPAtomError;\n\t}\n\t// Find STSC\n\tpos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomStsc);\n\tif (pos > 0){\n\t\tstscPtr = moovBuff + trakOffset + pos + 16; // Skip Atom size, Atom name, ...\n\t} else {\n\t\treturn VPAtomError;\n\t}\n\t// Find STSD\n\tpos = findAtom(moovBuff+trakOffset,trakAtomSize,AtomStsd);\n\tif (pos > 0){\n\t\tstsdPtr = moovBuff + trakOffset + pos + 16; // Skip Atom size and Atom name\n\t\tVideoWidth = (getUint32FromUint8Buffer(stsdPtr,32)>>16) & 0xFFFF;\n\t\tVideoHeight = getUint32FromUint8Buffer(stsdPtr,32) & 0xFFFF;\n\t\tVideoCodec[0] = *(stsdPtr+90);\n\t\tVideoCodec[1] = *(stsdPtr+91);\n\t\tVideoCodec[2] = *(stsdPtr+92);\n\t\tVideoCodec[3] = *(stsdPtr+93);\n\t\tVideoCodec[4]= '\\0';\n\t} else {\n\t      return VPAtomError;\n\t}\n\n\n\t/**\n\t * Prepare Sample list\n\t */\n\tuint32 countChunk = 0; // Total number of chunk\n\tuint32 currChunk=0; // Counter for current chunk\n\tuint32 currChunkInStsc=0; // Current chunk described in stsc Atom\n\tuint32 ChunkAddr = 0; // Current chunk offset\n\tuint32 countSample = 0; // Counter for sample in a chunk\n\tuint32 currSample = 0; // Counter for current sample (/total sample in file)\n\tuint32 SamplePerChunk = 0; // Value sample per chunk\n\tuint32 currStscPos = 0; // Current stsc table\n\tuint32 Offset = 0; // Offset from ChunkAddr to sample data start\n\tint32 currSttsPos = 0;\n\tuint32 SameTimestampCount = 0; // For case where n sample have the same timestamp\n\tuint32 temp;\n\tSample* currSamplePtr = 0; // Pointer to current Sample\n\tSample* aSample = 0; // Current Sample element\n\tbool initList = false; // Boolean changed after first sample is read\n\n\t/* Get \"Number of entries\" field of stco atom */\n\tcountChunk = getUint32FromUint8Buffer(stcoPtr-4,0);\n\t/* Init currChunk */\n\tcurrChunkInStsc = getUint32FromUint8Buffer(stscPtr,currStscPos*12);\n\n\tfor (currChunk=0;currChunk<countChunk;currChunk++){\n\t\tChunkAddr = getUint32FromUint8Buffer(stcoPtr,currChunk*4);\n\t\tif (currChunkInStsc == currChunk+1){\n\t\t\tSamplePerChunk = getUint32FromUint8Buffer(stscPtr,currStscPos*12+4);\n\t\t\tcurrStscPos++;\n\t\t\tcurrChunkInStsc = getUint32FromUint8Buffer(stscPtr,currStscPos*12);\n\t\t} else {\n\t\t\t// Repeat old value\n\t\t}\n\t\tOffset = 0;\n\t\tfor (countSample=0;countSample<SamplePerChunk;countSample++){\n\t\t\t/* Malloc a new sample */\n\t\t\taSample = (Sample*)malloc(sizeof(Sample));\n\t\t\t/* Get sample size */\n\t\t\taSample->size = getUint32FromUint8Buffer(stszPtr,currSample*4);\n\t\t\tcurrSample++;\n\t\t\t/* Get sample addr */\n\t\t\taSample->addr = ChunkAddr + Offset;\n\t\t\tOffset = Offset + aSample->size;\n\t\t\t/* Get sample timestamp */\n\t\t\tif (SameTimestampCount == 0){\n\t\t\t\t// Read new stts element\n\t\t\t\tSameTimestampCount = getUint32FromUint8Buffer(sttsPtr,currSttsPos*8);\n\t\t\t\tcurrSttsPos++;\n\t\t\t}\n\t\t\ttemp = getUint32FromUint8Buffer(sttsPtr,(currSttsPos-1)*8+4);\n\t\t\taSample->timestamp = (uint32)((temp*1000)/TimeScale);\n\t\t\tSameTimestampCount--;\n\t\t\t/* Set next to NULL */\n\t\t\taSample->next = NULL;\n\t\t\t/* Update the sample list */\n\t\t\tif (initList == false){\n\t\t\t\tsamplePtr = aSample;\n\t\t\t\tcurrSamplePtr = aSample;\n\t\t\t\tinitList = true;\n\t\t\t} else {\n\t\t\t\tcurrSamplePtr->next = aSample;\n\t\t\t\tcurrSamplePtr = aSample;\n\t\t\t\tcurrSamplePtr->next = NULL;\n\t\t\t}\n\t\t}\n\t}\n\treturn VPAtomSucces;\n}\n\n/**\n* Get Videoframe\n*\n* @param aOutBuffer buffer to write the videoframe\n* @param aBufferSize size of the buffer\n* @param aTimestamp timestamp\n* @return error code for overrun buffer\n*/\nint getFrame (uint8* aOutBuffer,uint32* aBufferSize, uint32* aTimestamp){\n\t// Temp sample to free data\n\tSample* tmp;\n\tif (samplePtr != NULL){\n\t\tif (aOutBuffer == NULL || f==NULL){\n\t\t    return VPAtomError;\n\t\t}\n\t\tfseek(f,samplePtr->addr,SEEK_SET);\n\t\tif (fread(aOutBuffer,1,samplePtr->size,f) != samplePtr->size){\n\t\t\treturn VPAtomError;\n\t\t}\n\t\t*aTimestamp = samplePtr->timestamp;\n\t\t*aBufferSize = samplePtr->size;\n\t\t/* Free the sample */\n\t\ttmp = samplePtr;\n\t\tsamplePtr = samplePtr->next;\n\t\tfree(tmp);\n\t\treturn VPAtomSucces;\n\t} else {\n\t\taOutBuffer = NULL;\n\t\t*aBufferSize = 0;\n\t\t*aTimestamp = 0;\n\t\treturn VPAtomError;\n\t}\n}\n\n/**\n * Release file by closing it\n *\n * @return error code\n */\nint release(){\n\tif (f != NULL){\n\t\tfclose(f);\n\t}\n\treturn cleanupParser();\n}\n\n/**\n * Get the video duration\n *\n * @return video duration in seconds ( last 3 digits are ms)\n */\nuint32 getVideoDuration (){\n\tuint32 retValue = 0;\n\tretValue = ((VideoLength/TimeScale)*1000)+(VideoLength%TimeScale);\n\treturn retValue;\n}\n\n/**\n * Get the video codec\n *\n * @return video codec string\n */\nchar* getVideoCodec (){\n\treturn VideoCodec;\n}\n\n/**\n * Get video width\n *\n * @return video width\n */\nuint32 getVideoWidth (){\n\treturn VideoWidth;\n}\n\n/**\n * Get the video height\n *\n * @return video height\n */\nuint32 getVideoHeight(){\n\treturn VideoHeight;\n}\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/3GPVideoParser.h",
    "content": "/*\n * Copyright (C) 2009 OrangeLabs\n * 3GPVideoParser.h\n *\n *  Created on: 12 août 2009\n *      Author: rglt1266\n */\n\n#ifndef _3GPVIDEOPARSER_H_\n#define _3GPVIDEOPARSER_H_\n\n/* Define new types */\ntypedef unsigned char uint8;\ntypedef unsigned short uint16;\ntypedef short int16;\ntypedef unsigned long uint32;\ntypedef long int32;\n\n#define DEBUG 1;\n\n/* Define important atoms 4Bytes code (char)*/\n#define\tAtomFtyp 0x66747970 /* File type compatibility atom */\n#define\tAtomMdat 0x6D646174 /* Movie sample data atom */\n#define\tAtomMoov 0x6D6F6F76 /* Movie ressource metadata atom */\n#define\tAtomMdhd 0x6D646864 /* Video media information header atom */\n#define\tAtomMvhd 0x6D766864 /* Video media information header atom */\n#define\tAtomStts 0x73747473 /* Time-to-sample atom */\n#define\tAtomStco 0x7374636F /* Sample-to-chunck atom */\n#define\tAtomTrak 0x7472616B /* Trak atom */\n#define\tAtomStsz 0x7374737A /* Sample size atom */\n#define AtomStsc 0x73747363 /* Nb of sample per chunck */\n#define AtomStsd 0x73747364 /* Nb of sample per chunck */\n#define AtomVmhd 0x766D6864 /* Identifier of a video track */\n\n/* Define error codes */\n#define VPAtomError 0\n#define VPAtomSucces 1\n\ntypedef struct {\n\tuint32 ptr;\n\tuint32 size;\n} Atom;\n\nstruct sample {\n\tuint32 addr;\n\tuint32 size;\n\tuint32 timestamp;\n\tstruct sample *next;\n};\ntypedef struct sample Sample;\n\nint Init3GPVideoParser (char *);\nint release();\nint getFrame (uint8*,uint32*, uint32*);\nuint32 getVideoDuration();\nuint32 getVideoWidth();\nuint32 getVideoHeight();\nchar* getVideoCodec();\n\n#endif /* 3GPVIDEOPARSER_H_ */\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/adaptive_smooth_no_mmx.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*\n\n Description: Separated modules into one function per file and put into\n    new template.\n\n Description: Optimizing C code and adding comments.  Also changing variable\n    names to make them more meaningful.\n\n Who:                   Date:\n Description:\n\n------------------------------------------------------------------------------\n INPUT AND OUTPUT DEFINITIONS\n\n Inputs:\n\n    Rec_Y = pointer to 0th position in buffer containing luminance values\n        of type uint8.\n    y_start = value of y coordinate of type int that specifies the first\n        row of pixels to be used in the filter algorithm.\n    x_start = value of x coordinate of type int that specifies the first\n        column of pixels to be used in the filter algorithm.\n    y_blk_start = value of the y coordinate of type int that specifies the\n        row of pixels which contains the start of a block. The row\n        specified by y_blk_start+BLK_SIZE is the last row of pixels\n        that are used in the filter algorithm.\n    x_blk_start = value of the x coordinate of type int that specifies the\n        column of pixels which contains the start of a block.  The\n        column specified by x_blk_start+BLK_SIZE is the last column of\n        pixels that are used in the filter algorithm.\n    thr = value of type int that is compared to the elements in Rec_Y to\n        determine if a particular value in Rec_Y will be modified by\n        the filter or not\n    width = value of type int that specifies the width of the display\n        in pixels (or pels, equivalently).\n    max_diff = value of type int that specifies the value that may be added\n        or subtracted from the pixel in Rec_Y that is being filtered\n        if the filter algorithm decides to change that particular\n        pixel's luminance value.\n\n\n Local Stores/Buffers/Pointers Needed:\n    None\n\n Global Stores/Buffers/Pointers Needed:\n    None\n\n Outputs:\n    None\n\n Pointers and Buffers Modified:\n    Buffer pointed to by Rec_Y is modified with the filtered\n    luminance values.\n\n Local Stores Modified:\n    None\n\n Global Stores Modified:\n    None\n\n------------------------------------------------------------------------------\n FUNCTION DESCRIPTION\n\n This function implements a motion compensated noise filter using adaptive\n weighted averaging of luminance values.  *Rec_Y contains the luminance values\n that are being filtered.\n\n The picture below depicts a 3x3 group of pixel luminance values.  The \"u\", \"c\",\n and \"l\" stand for \"upper\", \"center\" and \"lower\", respectively.  The location\n of pelc0 is specified by x_start and y_start in the 1-D array \"Rec_Y\" as\n follows (assuming x_start=0):\n\n location of pelc0 = [(y_start+1) * width] + x_start\n\n Moving up or down 1 row (moving from pelu2 to pelc2, for example) is done by\n incrementing or decrementing \"width\" elements within Rec_Y.\n\n The coordinates of the upper left hand corner of a block (not the group of\n 9 pixels depicted in the figure below) is specified by\n (y_blk_start, x_blk_start).  The width and height of the block is BLKSIZE.\n (y_start,x_start) may be specified independently of (y_blk_start, x_blk_start).\n\n    (y_start,x_start)\n -----------|--------------------------\n    |   |   |   |   |\n    |   X   | pelu1 | pelu2 |\n    | pelu0 |   |   |\n    |   |   |   |\n --------------------------------------\n    |   |   |   |\n    | pelc0 | pelc1 | pelc2 |\n    |   |   |   |\n    |   |   |   |\n --------------------------------------\n    |   |   |   |\n    | pell0 | pell1 | pell2 |\n    |   |   |   |\n    |   |   |   |\n --------------------------------------\n\n The filtering of the luminance values is achieved by comparing the 9\n luminance values to a threshold value (\"thr\") and then changing the\n luminance value of pelc1 if all of the values are above or all of the values\n are below the threshold.  The amount that the luminance value is changed\n depends on a weighted sum of the 9 luminance values. The position of Pelc1\n is then advanced to the right by one (as well as all of the surrounding pixels)\n and the same calculation is performed again for the luminance value of the new\n Pelc1. This continues row-wise until pixels in the last row of the block are\n filtered.\n\n\n------------------------------------------------------------------------------\n REQUIREMENTS\n\n None.\n\n------------------------------------------------------------------------------\n REFERENCES\n\n ..\\corelibs\\decoder\\common\\src\\post_proc.c\n\n------------------------------------------------------------------------------\n PSEUDO-CODE\n\n------------------------------------------------------------------------------\n RESOURCES USED\n   When the code is written for a specific target processor the\n     the resources used should be documented below.\n\n STACK USAGE: [stack count for this module] + [variable to represent\n          stack usage for each subroutine called]\n\n     where: [stack usage variable] = stack usage for [subroutine\n         name] (see [filename].ext)\n\n DATA MEMORY USED: x words\n\n PROGRAM MEMORY USED: x words\n\n CLOCK CYCLES: [cycle count equation for this module] + [variable\n           used to represent cycle count for each subroutine\n           called]\n\n     where: [cycle count variable] = cycle count for [subroutine\n        name] (see [filename].ext)\n\n------------------------------------------------------------------------------\n*/\n\n\n/*----------------------------------------------------------------------------\n; INCLUDES\n----------------------------------------------------------------------------*/\n#include    \"mp4dec_lib.h\"\n#include    \"post_proc.h\"\n#include    \"mp4def.h\"\n\n#define OSCL_DISABLE_WARNING_CONV_POSSIBLE_LOSS_OF_DATA\n#include \"osclconfig_compiler_warnings.h\"\n/*----------------------------------------------------------------------------\n; MACROS\n; Define module specific macros here\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; DEFINES\n; Include all pre-processor statements here. Include conditional\n; compile variables also.\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; LOCAL FUNCTION DEFINITIONS\n; Function Prototype declaration\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; LOCAL STORE/BUFFER/POINTER DEFINITIONS\n; Variable declaration - defined here and used outside this module\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; EXTERNAL FUNCTION REFERENCES\n; Declare functions defined elsewhere and referenced in this module\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES\n; Declare variables used in this module but defined elsewhere\n----------------------------------------------------------------------------*/\n#ifdef PV_POSTPROC_ON\n/*----------------------------------------------------------------------------\n; FUNCTION CODE\n----------------------------------------------------------------------------*/\nvoid AdaptiveSmooth_NoMMX(\n    uint8 *Rec_Y,       /* i/o  */\n    int y_start,        /* i    */\n    int x_start,        /* i    */\n    int y_blk_start,    /* i    */\n    int x_blk_start,    /* i    */\n    int thr,        /* i    */\n    int width,      /* i    */\n    int max_diff        /* i    */\n)\n{\n\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    int  sign_v[15];\n    int sum_v[15];\n    int *sum_V_ptr;\n    int *sign_V_ptr;\n    uint8 pelu;\n    uint8 pelc;\n    uint8 pell;\n    uint8 *pelp;\n    uint8 oldrow[15];\n    int  sum;\n    int sum1;\n    uint8 *Rec_Y_ptr;\n    int32  addr_v;\n    int row_cntr;\n    int col_cntr;\n\n    /*----------------------------------------------------------------------------\n    ; Function body here\n    ----------------------------------------------------------------------------*/\n    /*  first row\n    */\n    addr_v = (int32)(y_start + 1) * width;  /* y coord of 1st element in the row  /\n                     /containing pelc pixel /     */\n    Rec_Y_ptr = &Rec_Y[addr_v + x_start];  /* initializing pointer to\n                           /  pelc0 position  */\n    sum_V_ptr = &sum_v[0];  /* initializing pointer to 0th element of array\n                /   that will contain weighted sums of pixel\n                /   luminance values */\n    sign_V_ptr = &sign_v[0];  /*  initializing pointer to 0th element of\n                  /   array that will contain sums that indicate\n                  /    how many of the 9 pixels are above or below\n                  /    the threshold value (thr)    */\n    pelp = &oldrow[0];  /* initializing pointer to the 0th element of array\n                /    that will contain current values of pelc that\n                /   are saved and used as values of pelu when the\n                /   next row of pixels are filtered */\n\n    pelu = *(Rec_Y_ptr - width);  /* assigning value of pelu0 to pelu  */\n    *pelp++ = pelc = *Rec_Y_ptr; /* assigning value of pelc0 to pelc and\n                     /  storing this value in pelp which\n                     /   will be used as value of pelu0 when\n                     /  next row is filtered */\n    pell = *(Rec_Y_ptr + width);  /* assigning value of pell0 to pell */\n    Rec_Y_ptr++; /* advancing pointer from pelc0 to pelc1 */\n    *sum_V_ptr++ = pelu + (pelc << 1) + pell;  /* weighted sum of pelu0,\n                         /  pelc0 and pell0  */\n    /* sum of 0's and 1's (0 if pixel value is below thr, 1 if value\n    /is above thr)  */\n    *sign_V_ptr++ = INDEX(pelu, thr) + INDEX(pelc, thr) + INDEX(pell, thr);\n\n\n    pelu = *(Rec_Y_ptr - width);  /* assigning value of pelu1 to pelu */\n    *pelp++ = pelc = *Rec_Y_ptr; /* assigning value of pelc1 to pelc and\n                     /  storing this value in pelp which\n                     /  will be used as the value of pelu1 when\n                     /  next row is filtered */\n    pell = *(Rec_Y_ptr + width);  /* assigning value of pell1 to pell */\n    Rec_Y_ptr++;  /* advancing pointer from pelc1 to pelc2 */\n    *sum_V_ptr++ = pelu + (pelc << 1) + pell; /* weighted sum of pelu1,\n                        / pelc1 and pell1  */\n    /* sum of 0's and 1's (0 if pixel value is below thr, 1 if value\n    /is above thr)  */\n    *sign_V_ptr++ = INDEX(pelu, thr) + INDEX(pelc, thr) + INDEX(pell, thr);\n\n    /* The loop below performs the filtering for the first row of\n    /   pixels in the region.  It steps across the remaining pixels in\n    /   the row and alters the luminance value of pelc1 if necessary,\n    /   depending on the luminance values of the adjacent pixels*/\n\n    for (col_cntr = (x_blk_start + BLKSIZE - 1) - x_start; col_cntr > 0; col_cntr--)\n    {\n        pelu = *(Rec_Y_ptr - width);  /* assigning value of pelu2 to\n                        /   pelu */\n        *pelp++ = pelc = *Rec_Y_ptr; /* assigning value of pelc2 to pelc\n                         / and storing this value in pelp\n                         / which will be used   as value of pelu2\n                         / when next row is filtered */\n        pell = *(Rec_Y_ptr + width); /* assigning value of pell2 to pell */\n\n        /* weighted sum of pelu1, pelc1 and pell1  */\n        *sum_V_ptr = pelu + (pelc << 1) + pell;\n        /* sum of 0's and 1's (0 if pixel value is below thr,\n        /1 if value is above thr)  */\n        *sign_V_ptr = INDEX(pelu, thr) + INDEX(pelc, thr) +\n                      INDEX(pell, thr);\n        /* the value of sum1 indicates how many of the 9 pixels'\n        /luminance values are above or equal to thr */\n        sum1 = *(sign_V_ptr - 2) + *(sign_V_ptr - 1) + *sign_V_ptr;\n\n        /* alter the luminance value of pelc1 if all 9 luminance values\n        /are above or equal to thr or if all 9 values are below thr */\n        if (sum1 == 0 || sum1 == 9)\n        {\n            /* sum is a weighted average of the 9 pixel luminance\n            /values   */\n            sum = (*(sum_V_ptr - 2) + (*(sum_V_ptr - 1) << 1) +\n                   *sum_V_ptr + 8) >> 4;\n\n            Rec_Y_ptr--;  /* move pointer back to pelc1  */\n            /* If luminance value of pelc1 is larger than\n            / sum by more than max_diff, then subract max_diff\n            / from luminance value of pelc1*/\n            if ((int)(*Rec_Y_ptr - sum) > max_diff)\n            {\n                sum = *Rec_Y_ptr - max_diff;\n            }\n            /* If luminance value of pelc1 is smaller than\n            / sum by more than max_diff, then add max_diff\n            / to luminance value of pelc1*/\n            else if ((int)(*Rec_Y_ptr - sum) < -max_diff)\n            {\n                sum = *Rec_Y_ptr + max_diff;\n            }\n            *Rec_Y_ptr++ = sum; /* assign value of sum to pelc1\n                         and advance pointer to pelc2 */\n        }\n        Rec_Y_ptr++; /* advance pointer to new value of pelc2\n                 /   old pelc2 is now treated as pelc1*/\n        sum_V_ptr++; /* pointer is advanced so next weighted sum may\n                 /  be saved */\n        sign_V_ptr++; /* pointer is advanced so next sum of 0's and\n                  / 1's may be saved  */\n    }\n\n    /* The nested loops below perform the filtering for the remaining rows */\n\n    addr_v = (y_start + 2) * width;  /* advance addr_v to the next row\n                     /   (corresponding to pell0)*/\n    /* The outer loop steps throught the rows.   */\n    for (row_cntr = (y_blk_start + BLKSIZE) - (y_start + 2); row_cntr > 0; row_cntr--)\n    {\n        Rec_Y_ptr = &Rec_Y[addr_v + x_start]; /* advance pointer to\n            /the old pell0, which has become the new pelc0 */\n        addr_v += width;  /* move addr_v down 1 row */\n        sum_V_ptr = &sum_v[0];  /* re-initializing pointer */\n        sign_V_ptr = &sign_v[0];  /* re-initilaizing pointer */\n        pelp = &oldrow[0]; /* re-initializing pointer */\n\n        pelu = *pelp; /* setting pelu0 to old value of pelc0 */\n        *pelp++ = pelc = *Rec_Y_ptr;\n        pell = *(Rec_Y_ptr + width);\n        Rec_Y_ptr++;\n        *sum_V_ptr++ = pelu + (pelc << 1) + pell;\n        *sign_V_ptr++ = INDEX(pelu, thr) + INDEX(pelc, thr) +\n                        INDEX(pell, thr);\n\n        pelu = *pelp; /* setting pelu1 to old value of pelc1 */\n        *pelp++ = pelc = *Rec_Y_ptr;\n        pell = *(Rec_Y_ptr + width);\n        Rec_Y_ptr++;\n        *sum_V_ptr++ = pelu + (pelc << 1) + pell;\n        *sign_V_ptr++ = INDEX(pelu, thr) + INDEX(pelc, thr) +\n                        INDEX(pell, thr);\n        /* The inner loop steps through the columns */\n        for (col_cntr = (x_blk_start + BLKSIZE - 1) - x_start; col_cntr > 0; col_cntr--)\n        {\n            pelu = *pelp; /* setting pelu2 to old value of pelc2 */\n            *pelp++ = pelc = *Rec_Y_ptr;\n            pell = *(Rec_Y_ptr + width);\n\n            *sum_V_ptr = pelu + (pelc << 1) + pell;\n            *sign_V_ptr = INDEX(pelu, thr) + INDEX(pelc, thr) +\n                          INDEX(pell, thr);\n\n            sum1 = *(sign_V_ptr - 2) + *(sign_V_ptr - 1) + *sign_V_ptr;\n            /* the \"if\" statement below is the same as the one in\n            / the first loop */\n            if (sum1 == 0 || sum1 == 9)\n            {\n                sum = (*(sum_V_ptr - 2) + (*(sum_V_ptr - 1) << 1) +\n                       *sum_V_ptr + 8) >> 4;\n\n                Rec_Y_ptr--;\n                if ((int)(*Rec_Y_ptr - sum) > max_diff)\n                {\n                    sum = *Rec_Y_ptr - max_diff;\n                }\n                else if ((int)(*Rec_Y_ptr - sum) < -max_diff)\n                {\n                    sum = *Rec_Y_ptr + max_diff;\n                }\n                *Rec_Y_ptr++ = (uint8) sum;\n            }\n            Rec_Y_ptr++;\n            sum_V_ptr++;\n            sign_V_ptr++;\n        }\n    }\n\n    /*----------------------------------------------------------------------------\n    ; Return nothing or data or data pointer\n    ----------------------------------------------------------------------------*/\n    return;\n}\n#endif\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/bitstream.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"bitstream.h\"\n#include \"mp4dec_lib.h\"\n\n\n#define OSCL_DISABLE_WARNING_CONDITIONAL_IS_CONSTANT\n#include \"osclconfig_compiler_warnings.h\"\n/* to mask the n least significant bits of an integer */\nstatic const uint32 msk[33] =\n{\n    0x00000000, 0x00000001, 0x00000003, 0x00000007,\n    0x0000000f, 0x0000001f, 0x0000003f, 0x0000007f,\n    0x000000ff, 0x000001ff, 0x000003ff, 0x000007ff,\n    0x00000fff, 0x00001fff, 0x00003fff, 0x00007fff,\n    0x0000ffff, 0x0001ffff, 0x0003ffff, 0x0007ffff,\n    0x000fffff, 0x001fffff, 0x003fffff, 0x007fffff,\n    0x00ffffff, 0x01ffffff, 0x03ffffff, 0x07ffffff,\n    0x0fffffff, 0x1fffffff, 0x3fffffff, 0x7fffffff,\n    0xffffffff\n};\n\n\n/* ======================================================================== */\n/*  Function : BitstreamFillCache()                                         */\n/*  Date     : 08/29/2000                                                   */\n/*  Purpose  : Read more bitstream data into buffer & the 24-byte cache.    */\n/*              This function is different from BitstreamFillBuffer in      */\n/*              that the buffer is the frame-based buffer provided by       */\n/*              the application.                                            */\n/*  In/out   :                                                              */\n/*  Return   : PV_SUCCESS if successed, PV_FAIL if failed.                  */\n/*  Modified : 4/16/01  : removed return of PV_END_OF_BUFFER                */\n/* ======================================================================== */\nPV_STATUS BitstreamFillCache(BitstreamDecVideo *stream)\n{\n    uint8 *bitstreamBuffer = stream->bitstreamBuffer;\n    uint8 *v;\n    int num_bits, i;\n\n    stream->curr_word |= (stream->next_word >> stream->incnt);   // stream->incnt cannot be 32\n    stream->next_word <<= (31 - stream->incnt);\n    stream->next_word <<= 1;\n    num_bits = stream->incnt_next + stream->incnt;\n    if (num_bits >= 32)\n    {\n        stream->incnt_next -= (32 - stream->incnt);\n        stream->incnt = 32;\n        return PV_SUCCESS;\n    }\n    /* this check can be removed if there is additional extra 4 bytes at the end of the bitstream */\n    v = bitstreamBuffer + stream->read_point;\n\n    if (stream->read_point > stream->data_end_pos - 4)\n    {\n        if (stream->data_end_pos <= stream->read_point)\n        {\n            stream->incnt = num_bits;\n            stream->incnt_next = 0;\n            return PV_SUCCESS;\n        }\n\n        stream->next_word = 0;\n\n        for (i = 0; i < stream->data_end_pos - stream->read_point; i++)\n        {\n            stream->next_word |= (v[i] << ((3 - i) << 3));\n        }\n\n        stream->read_point = stream->data_end_pos;\n        stream->curr_word |= (stream->next_word >> num_bits); // this is safe\n\n        stream->next_word <<= (31 - num_bits);\n        stream->next_word <<= 1;\n        num_bits = i << 3;\n        stream->incnt += stream->incnt_next;\n        stream->incnt_next = num_bits - (32 - stream->incnt);\n        if (stream->incnt_next < 0)\n        {\n            stream->incnt +=  num_bits;\n            stream->incnt_next = 0;\n        }\n        else\n        {\n            stream->incnt = 32;\n        }\n        return PV_SUCCESS;\n    }\n\n    stream->next_word = ((uint32)v[0] << 24) | (v[1] << 16) | (v[2] << 8) | v[3];\n    stream->read_point += 4;\n\n    stream->curr_word |= (stream->next_word >> num_bits); // this is safe\n    stream->next_word <<= (31 - num_bits);\n    stream->next_word <<= 1;\n    stream->incnt_next += stream->incnt;\n    stream->incnt = 32;\n    return PV_SUCCESS;\n}\n\n\n/* ======================================================================== */\n/*  Function : BitstreamReset()                                             */\n/*  Date     : 08/29/2000                                                   */\n/*  Purpose  : Initialize the bitstream buffer for frame-based decoding.    */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nvoid BitstreamReset(BitstreamDecVideo *stream, uint8 *buffer, int32 buffer_size)\n{\n    /* set up frame-based bitstream buffer */\n    oscl_memset(stream, 0, sizeof(BitstreamDecVideo));\n    stream->data_end_pos = buffer_size;\n    stream->bitstreamBuffer = buffer;\n}\n\n\n/* ======================================================================== */\n/*  Function : BitstreamOpen()                                              */\n/*  Purpose  : Initialize the bitstream data structure.                     */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nint BitstreamOpen(BitstreamDecVideo *stream, int layer)\n{\n    OSCL_UNUSED_ARG(layer);\n    int buffer_size = 0;\n    /* set up linear bitstream buffer */\n//  stream->currentBytePos = 0;\n    stream->data_end_pos = 0;\n\n    stream->incnt = 0;\n    stream->incnt_next = 0;\n    stream->bitcnt = 0;\n    stream->curr_word = stream->next_word = 0;\n    stream->read_point = stream->data_end_pos;\n    return buffer_size;\n}\n\n\n/* ======================================================================== */\n/*  Function : BitstreamClose()                                             */\n/*  Purpose  : Cleanup the bitstream data structure.                        */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nvoid BitstreamClose(BitstreamDecVideo * stream)\n{\n    OSCL_UNUSED_ARG(stream);\n    return;\n}\n\n\n/***********************************************************CommentBegin******\n*\n* -- BitstreamShowBits32HC\n* Shows 32 bits\n***********************************************************CommentEnd********/\n\nPV_STATUS BitstreamShowBits32HC(BitstreamDecVideo *stream, uint32 *code)\n{\n    PV_STATUS status = PV_SUCCESS;\n\n    if (stream->incnt < 32)\n    {\n        /* frame-based decoding */\n        status = BitstreamFillCache(stream);\n    }\n    *code = stream->curr_word;\n    return status;\n}\n\n/***********************************************************CommentBegin******\n*\n* -- BitstreamShowBits32\n* Shows upto and including 31 bits\n***********************************************************CommentEnd********/\nPV_STATUS BitstreamShowBits32(BitstreamDecVideo *stream, int nbits, uint32 *code)\n{\n    PV_STATUS status = PV_SUCCESS;\n\n    if (stream->incnt < nbits)\n    {\n        /* frame-based decoding */\n        status = BitstreamFillCache(stream);\n    }\n    *code = stream->curr_word >> (32 - nbits);\n    return status;\n}\n\n\n#ifndef PV_BS_INLINE\n/*========================================================================= */\n/*  Function:   BitstreamShowBits16()                                       */\n/*  Date:       12/18/2000                                                  */\n/*  Purpose:    To see the next \"nbits\"(nbits<=16) bitstream bits           */\n/*              without advancing the read pointer                          */\n/*                                                                          */\n/* =========================================================================*/\nPV_STATUS BitstreamShowBits16(BitstreamDecVideo *stream, int nbits, uint *code)\n{\n    PV_STATUS status = PV_SUCCESS;\n\n\n    if (stream->incnt < nbits)\n    {\n        /* frame-based decoding */\n        status = BitstreamFillCache(stream);\n    }\n\n    *code = stream->curr_word >> (32 - nbits);\n    return status;\n}\n\n\n/*========================================================================= */\n/*  Function:   BitstreamShow15Bits()                                       */\n/*  Date:       01/23/2001                                                  */\n/*  Purpose:    To see the next 15 bitstream bits                           */\n/*              without advancing the read pointer                          */\n/*                                                                          */\n/* =========================================================================*/\nPV_STATUS BitstreamShow15Bits(BitstreamDecVideo *stream, uint *code)\n{\n    PV_STATUS status = PV_SUCCESS;\n\n    if (stream->incnt < 15)\n    {\n        /* frame-based decoding */\n        status = BitstreamFillCache(stream);\n    }\n    *code = stream->curr_word >> 17;\n    return status;\n}\n/*========================================================================= */\n/*  Function: BitstreamShow13Bits                                           */\n/*  Date:       050923                                              */\n/*  Purpose:    Faciliate and speed up showing 13 bit from bitstream        */\n/*              used in VlcTCOEFF decoding                                  */\n/*  Modified:                            */\n/* =========================================================================*/\nPV_STATUS BitstreamShow13Bits(BitstreamDecVideo *stream, uint *code)\n{\n    PV_STATUS status = PV_SUCCESS;\n\n    if (stream->incnt < 13)\n    {\n        /* frame-based decoding */\n        status = BitstreamFillCache(stream);\n    }\n    *code = stream->curr_word >> 19;\n    return status;\n}\n\nuint BitstreamReadBits16_INLINE(BitstreamDecVideo *stream, int nbits)\n{\n    uint code;\n    PV_STATUS status;\n\n    if (stream->incnt < nbits)\n    {\n        /* frame-based decoding */\n        status = BitstreamFillCache(stream);\n    }\n    code = stream->curr_word >> (32 - nbits);\n    PV_BitstreamFlushBits(stream, nbits);\n    return code;\n}\n\n\nuint BitstreamRead1Bits_INLINE(BitstreamDecVideo *stream)\n{\n    PV_STATUS status = PV_SUCCESS;\n    uint    code;\n\n\n    if (stream->incnt < 1)\n    {\n        /* frame-based decoding */\n        status = BitstreamFillCache(stream);\n    }\n    code = stream->curr_word >> 31;\n    PV_BitstreamFlushBits(stream, 1);\n\n    return code;\n}\n\n#endif\n\n/* ======================================================================== */\n/*  Function : BitstreamReadBits16()                                        */\n/*  Purpose  : Read bits (nbits <=16) from bitstream buffer.                */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/* ======================================================================== */\nuint BitstreamReadBits16(BitstreamDecVideo *stream, int nbits)\n{\n    uint code;\n\n    if (stream->incnt < nbits)\n    {\n        /* frame-based decoding */\n        BitstreamFillCache(stream);\n    }\n    code = stream->curr_word >> (32 - nbits);\n    PV_BitstreamFlushBits(stream, nbits);\n    return code;\n}\n\n/* ======================================================================== */\n/*  Function : BitstreamRead1Bits()                                         */\n/*  Date     : 10/23/2000                                                   */\n/*  Purpose  : Faciliate and speed up reading 1 bit from bitstream.         */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/* ======================================================================== */\n\nuint BitstreamRead1Bits(BitstreamDecVideo *stream)\n{\n    uint    code;\n\n    if (stream->incnt < 1)\n    {\n        /* frame-based decoding */\n        BitstreamFillCache(stream);\n    }\n    code = stream->curr_word >> 31;\n    PV_BitstreamFlushBits(stream, 1);\n\n    return code;\n}\n\n/* ======================================================================== */\n/*  Function : PV_BitstreamFlushBitsCheck()                                 */\n/*  Purpose  : Flush nbits bits from bitstream buffer. Check for cache      */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nPV_STATUS PV_BitstreamFlushBitsCheck(BitstreamDecVideo *stream, int nbits)\n{\n    PV_STATUS status = PV_SUCCESS;\n\n    stream->bitcnt += nbits;\n    stream->incnt -= nbits;\n    if (stream->incnt < 0)\n    {\n        /* frame-based decoding */\n        status = BitstreamFillCache(stream);\n\n        if (stream->incnt < 0)\n        {\n            stream->bitcnt += stream->incnt;\n            stream->incnt = 0;\n        }\n    }\n    stream->curr_word <<= nbits;\n    return status;\n}\n\n/* ======================================================================== */\n/*  Function : BitstreamReadBits32()                                        */\n/*  Purpose  : Read bits from bitstream buffer.                             */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/* ======================================================================== */\nuint32 BitstreamReadBits32(BitstreamDecVideo *stream, int nbits)\n{\n    uint32 code;\n\n    if (stream->incnt < nbits)\n    {\n        /* frame-based decoding */\n        BitstreamFillCache(stream);\n    }\n    code = stream->curr_word >> (32 - nbits);\n    PV_BitstreamFlushBits(stream, nbits);\n    return code;\n}\n\nuint32 BitstreamReadBits32HC(BitstreamDecVideo *stream)\n{\n    uint32 code;\n\n    BitstreamShowBits32HC(stream, &code);\n    stream->bitcnt += 32;\n    stream->incnt = 0;\n    stream->curr_word = 0;\n    return code;\n}\n\n/* ======================================================================== */\n/*  Function : BitstreamCheckEndBuffer()                                    */\n/*  Date     : 03/30/2001                                                   */\n/*  Purpose  : Check to see if we are at the end of buffer                  */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nPV_STATUS BitstreamCheckEndBuffer(BitstreamDecVideo *stream)\n{\n    if (stream->read_point >= stream->data_end_pos && stream->incnt <= 0) return PV_END_OF_VOP;\n    return PV_SUCCESS;\n}\n\n\nPV_STATUS PV_BitstreamShowBitsByteAlign(BitstreamDecVideo *stream, int nbits, uint32 *code)\n{\n    PV_STATUS status = PV_SUCCESS;\n\n    int n_stuffed;\n\n    n_stuffed = 8 - (stream->bitcnt & 0x7); /*  07/05/01 */\n\n    if (stream->incnt < (nbits + n_stuffed))\n    {\n        /* frame-based decoding */\n        status = BitstreamFillCache(stream);\n    }\n\n    *code = (stream->curr_word << n_stuffed) >> (32 - nbits);\n    return status;\n}\n\n#ifdef PV_ANNEX_IJKT_SUPPORT\nPV_STATUS PV_BitstreamShowBitsByteAlignNoForceStuffing(BitstreamDecVideo *stream, int nbits, uint32 *code)\n{\n    PV_STATUS status = PV_SUCCESS;\n\n    int n_stuffed;\n\n    n_stuffed = (8 - (stream->bitcnt & 0x7)) & 7;\n\n    if (stream->incnt < (nbits + n_stuffed))\n    {\n        /* frame-based decoding */\n        status = BitstreamFillCache(stream);\n    }\n\n    *code = (stream->curr_word << n_stuffed) >> (32 - nbits);\n    return status;\n}\n#endif\n\nPV_STATUS PV_BitstreamByteAlign(BitstreamDecVideo *stream)\n{\n    PV_STATUS status = PV_SUCCESS;\n    int n_stuffed;\n\n    n_stuffed = 8 - (stream->bitcnt & 0x7); /*  07/05/01 */\n\n    /* We have to make sure we have enough bits in the cache.   08/15/2000 */\n    if (stream->incnt < n_stuffed)\n    {\n        /* frame-based decoding */\n        status = BitstreamFillCache(stream);\n    }\n\n\n    stream->bitcnt += n_stuffed;\n    stream->incnt -= n_stuffed;\n    stream->curr_word <<= n_stuffed;\n    if (stream->incnt < 0)\n    {\n        stream->bitcnt += stream->incnt;\n        stream->incnt = 0;\n    }\n    return status;\n}\n\n\nPV_STATUS BitstreamByteAlignNoForceStuffing(BitstreamDecVideo *stream)\n{\n    uint n_stuffed;\n\n    n_stuffed = (8 - (stream->bitcnt & 0x7)) & 0x7; /*  07/05/01 */\n\n    stream->bitcnt += n_stuffed;\n    stream->incnt -= n_stuffed;\n\n    if (stream->incnt < 0)\n    {\n        stream->bitcnt += stream->incnt;\n        stream->incnt = 0;\n    }\n    stream->curr_word <<= n_stuffed;\n    return PV_SUCCESS;\n}\n\n\n/* ==================================================================== */\n/*  Function : getPointer()                                             */\n/*  Date     : 10/98                                                    */\n/*  Purpose  : get current position of file pointer                     */\n/*  In/out   :                                                          */\n/*  Return   :                                                          */\n/* ==================================================================== */\nint32 getPointer(BitstreamDecVideo *stream)\n{\n    return stream->bitcnt;\n}\n\n\n\n\n/* ====================================================================== /\nFunction : movePointerTo()\nDate     : 05/14/2004\nPurpose  : move bitstream pointer to a desired position\nIn/out   :\nReturn   :\nModified :\n/ ====================================================================== */\nPV_STATUS movePointerTo(BitstreamDecVideo *stream, int32 pos)\n{\n    int32 byte_pos;\n    if (pos < 0)\n    {\n        pos = 0;\n    }\n\n    byte_pos = pos >> 3;\n\n    if (byte_pos > stream->data_end_pos)\n    {\n        byte_pos = stream->data_end_pos;\n    }\n\n    stream->read_point = byte_pos & -4;\n    stream->bitcnt = stream->read_point << 3;;\n    stream->curr_word = 0;\n    stream->next_word = 0;\n    stream->incnt = 0;\n    stream->incnt_next = 0;\n    BitstreamFillCache(stream);\n    PV_BitstreamFlushBits(stream, ((pos & 0x7) + ((byte_pos & 0x3) << 3)));\n    return PV_SUCCESS;\n}\n\n\n/* ======================================================================== */\n/*  Function : validStuffing()                                              */\n/*  Date     : 04/11/2000                                                   */\n/*  Purpose  : Check whether we have valid stuffing at current position.    */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified : 12/18/2000 : changed the pattern type to uint    */\n/*             04/01/2001 : removed PV_END_OF_BUFFER                        */\n/* ======================================================================== */\nBool validStuffing(BitstreamDecVideo *stream)\n{\n    uint n_stuffed;\n    uint pattern;\n\n\n    n_stuffed = 8 - (stream->bitcnt & 0x7);\n    BitstreamShowBits16(stream, n_stuffed, &pattern);\n    if (pattern == msk[n_stuffed-1]) return PV_TRUE;\n    return PV_FALSE;\n}\n#ifdef PV_ANNEX_IJKT_SUPPORT\nBool validStuffing_h263(BitstreamDecVideo *stream)\n{\n    uint n_stuffed;\n    uint pattern;\n\n\n    n_stuffed = (8 - (stream->bitcnt & 0x7)) & 7;  //  stream->incnt % 8\n    if (n_stuffed == 0)\n    {\n        return PV_TRUE;\n    }\n    BitstreamShowBits16(stream, n_stuffed, &pattern);\n    if (pattern == 0) return PV_TRUE;\n    return PV_FALSE;\n}\n#endif\n\n\n/* ======================================================================== */\n/*  Function : PVSearchNextH263Frame()                                      */\n/*  Date     : 04/08/2005                                                   */\n/*  Purpose  : search for 0x00 0x00 0x80                                    */\n/*  In/out   :                                                              */\n/*  Return   : PV_SUCCESS if succeeded  or PV_END_OF_VOP if failed          */\n/*  Modified :                                                              */\n/* ======================================================================== */\nPV_STATUS PVSearchNextH263Frame(BitstreamDecVideo *stream)\n{\n    PV_STATUS status = PV_SUCCESS;\n    uint8 *ptr;\n    int32 i;\n    int32 initial_byte_aligned_position = (stream->bitcnt + 7) >> 3;\n\n    ptr = stream->bitstreamBuffer + initial_byte_aligned_position;\n\n    i = PVLocateH263FrameHeader(ptr, stream->data_end_pos - initial_byte_aligned_position);\n    if (stream->data_end_pos <= initial_byte_aligned_position + i)\n    {\n        status = PV_END_OF_VOP;\n    }\n    (void)movePointerTo(stream, ((i + initial_byte_aligned_position) << 3)); /* ptr + i */\n    return status;\n}\n\n\n/* ======================================================================== */\n/*  Function : PVSearchNextM4VFrame()                                       */\n/*  Date     : 04/08/2005                                                   */\n/*  Purpose  : search for 0x00 0x00 0x01 and move the pointer to the        */\n/*  beginning of the start code                                             */\n/*  In/out   :                                                              */\n/*  Return   : PV_SUCCESS if succeeded  or PV_END_OF_VOP if failed          */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nPV_STATUS PVSearchNextM4VFrame(BitstreamDecVideo *stream)\n{\n    PV_STATUS status = PV_SUCCESS;\n    uint8 *ptr;\n    int32 i;\n    int32 initial_byte_aligned_position = (stream->bitcnt + 7) >> 3;\n\n    ptr = stream->bitstreamBuffer + initial_byte_aligned_position;\n\n    i = PVLocateFrameHeader(ptr, stream->data_end_pos - initial_byte_aligned_position);\n    if (stream->data_end_pos <= initial_byte_aligned_position + i)\n    {\n        status = PV_END_OF_VOP;\n    }\n    (void)movePointerTo(stream, ((i + initial_byte_aligned_position) << 3)); /* ptr + i */\n    return status;\n}\n\n\n\nvoid PVLocateM4VFrameBoundary(BitstreamDecVideo *stream)\n{\n    uint8 *ptr;\n    int32 byte_pos = (stream->bitcnt >> 3);\n\n    stream->searched_frame_boundary = 1;\n    ptr = stream->bitstreamBuffer + byte_pos;\n\n    stream->data_end_pos = PVLocateFrameHeader(ptr, (int32)stream->data_end_pos - byte_pos) + byte_pos;\n}\n\nvoid PVLocateH263FrameBoundary(BitstreamDecVideo *stream)\n{\n    uint8 *ptr;\n    int32 byte_pos = (stream->bitcnt >> 3);\n\n    stream->searched_frame_boundary = 1;\n    ptr = stream->bitstreamBuffer + byte_pos;\n\n    stream->data_end_pos = PVLocateH263FrameHeader(ptr, (int32)stream->data_end_pos - byte_pos) + byte_pos;\n}\n\n/* ======================================================================== */\n/*  Function : quickSearchVideoPacketHeader()               */\n/*  Date     : 05/08/2000                           */\n/*  Purpose  : Quick search for the next video packet header        */\n/*  In/out   :                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.            */\n/*  Modified :                              */\n/* ======================================================================== */\nPV_STATUS quickSearchVideoPacketHeader(BitstreamDecVideo *stream, int marker_length)\n{\n    PV_STATUS status = PV_SUCCESS;\n    uint32 tmpvar;\n\n\n    if (stream->searched_frame_boundary == 0)\n    {\n        PVLocateM4VFrameBoundary(stream);\n    }\n\n    do\n    {\n        status = BitstreamCheckEndBuffer(stream);\n        if (status == PV_END_OF_VOP) break;\n        PV_BitstreamShowBitsByteAlign(stream, marker_length, &tmpvar);\n        if (tmpvar == RESYNC_MARKER) break;\n        PV_BitstreamFlushBits(stream, 8);\n    }\n    while (status == PV_SUCCESS);\n\n    return status;\n}\n#ifdef PV_ANNEX_IJKT_SUPPORT\nPV_STATUS quickSearchH263SliceHeader(BitstreamDecVideo *stream)\n{\n    PV_STATUS status = PV_SUCCESS;\n    uint32 tmpvar;\n\n\n    if (stream->searched_frame_boundary == 0)\n    {\n        PVLocateH263FrameBoundary(stream);\n    }\n\n    do\n    {\n        status = BitstreamCheckEndBuffer(stream);\n        if (status == PV_END_OF_VOP) break;\n        PV_BitstreamShowBitsByteAlignNoForceStuffing(stream, 17, &tmpvar);\n        if (tmpvar == RESYNC_MARKER) break;\n        PV_BitstreamFlushBits(stream, 8);\n    }\n    while (status == PV_SUCCESS);\n\n    return status;\n}\n#endif\n/* ======================================================================== */\n/*          The following functions are for Error Concealment.              */\n/* ======================================================================== */\n\n/****************************************************/\n//  01/22/99 Quick search of Resync Marker\n// (actually the first part of it, i.e. 16 0's and a 1.\n\n/* We are not using the fastest algorithm possible. What this function does is\nto locate 11 consecutive 0's and then check if the 5 bits before them and\nthe 1 bit after them are all 1's.\n*/\n\n//  Table used for quick search of markers. Gives the last `1' in\n// 4 bits. The MSB is bit #1, the LSB is bit #4.\nconst int lastOne[] =\n{\n    0,  4,  3,  4,  2,  4,  3,  4,\n    1,  4,  3,  4,  2,  4,  3,  4\n};\n\n//  Table used for quick search of markers. Gives the last `0' in\n// 4 bits. The MSB is bit #1, the LSB is bit #4.\n/*const int lastZero[]=\n{\n    4,  3,  4,  2,  4,  3,  4,  1,\n        4,  3,  4,  2,  4,  3,  4,  0\n};\n*/\n//  Table used for quick search of markers. Gives the first `0' in\n// 4 bits. The MSB is bit #1, the LSB is bit #4.\nconst int firstZero[] =\n{\n    1, 1, 1, 1, 1, 1, 1, 1,\n    2, 2, 2, 2, 3, 3, 4, 0\n};\n\n//  Table used for quick search of markers. Gives the first `1' in\n// 4 bits. The MSB is bit #1, the LSB is bit #4.\nconst int firstOne[] =\n{\n    0, 4, 3, 3, 2, 2, 2, 2,\n    1, 1, 1, 1, 1, 1, 1, 1\n};\n\n\n/* ======================================================================== */\n/*  Function : quickSearchMarkers()                                         */\n/*  Date     : 01/25/99                                                     */\n/*  Purpose  : Quick search for Motion marker                               */\n/*  In/out   :                                                              */\n/*  Return   : Boolean true of false                                        */\n/*  Modified : 12/18/2000 : 32-bit version                    */\n/* ======================================================================== */\nPV_STATUS quickSearchMotionMarker(BitstreamDecVideo *stream)\n// MM: (11111000000000001)\n{\n    PV_STATUS status;\n    uint32 tmpvar, tmpvar2;\n\n    if (stream->searched_frame_boundary == 0)\n    {\n        PVLocateM4VFrameBoundary(stream);\n    }\n\n    while (TRUE)\n    {\n        status = BitstreamCheckEndBuffer(stream);\n        if (status == PV_END_OF_VOP) return PV_END_OF_VOP;\n\n        BitstreamShowBits32(stream, 17, &tmpvar);\n        if (!tmpvar) return PV_FAIL;\n\n        if (tmpvar & 1) //  Check if the 17th bit from the curr bit pos is a '1'\n        {\n            if (tmpvar == MOTION_MARKER_COMB)\n            {\n                return PV_SUCCESS; //  Found\n            }\n            else\n            {\n                tmpvar >>= 1;\n                tmpvar &= 0xF;\n                PV_BitstreamFlushBits(stream, (int)(12 + firstZero[tmpvar]));\n            }\n        }\n        else\n        {\n            //  01/25/99 Get the first 16 bits\n            tmpvar >>= 1;\n            tmpvar2 = tmpvar & 0xF;\n\n            //  01/26/99 Check bits #13 ~ #16\n            if (tmpvar2)\n            {\n                PV_BitstreamFlushBits(stream, (int)(7 + lastOne[tmpvar2]));\n            }\n            else\n            {\n                tmpvar >>= 4;\n                tmpvar2 = tmpvar & 0xF;\n\n                //  01/26/99 Check bits #9 ~ #12\n                if (tmpvar2)\n                {\n                    PV_BitstreamFlushBits(stream, (int)(3 + lastOne[tmpvar2]));\n                }\n                else\n                {\n                    tmpvar >>= 4;\n                    tmpvar2 = tmpvar & 0xF;\n\n                    //  01/26/99 Check bits #5 ~ #8\n                    // We don't need to check further\n                    // for the first 5 bits should be all 1's\n                    if (lastOne[tmpvar2] < 2)\n                    {\n                        /* we already have too many consecutive 0's. */\n                        /* Go directly pass the last of the 17 bits. */\n                        PV_BitstreamFlushBits(stream, 17);\n                    }\n                    else\n                    {\n                        PV_BitstreamFlushBits(stream, (int)(lastOne[tmpvar2] - 1));\n                    }\n                }\n            }\n        }\n\n    }\n}\n\n/* ======================================================================== */\n/*  Function : quickSearchDCM()                                             */\n/*  Date     : 01/22/99                                                     */\n/*  Purpose  : Quick search for DC Marker                                   */\n/*              We are not using the fastest algorithm possible.  What this */\n/*              function does is to locate 11 consecutive 0's and then      */\n/*              check if the 7 bits before them and the 1 bit after them    */\n/*              are correct.  (actually the first part of it, i.e. 16 0's   */\n/*              and a 1.                                                    */\n/*  In/out   :                                                              */\n/*  Return   : Boolean true of false                                        */\n/*  Modified : 12/18/2000 : 32-bit version                    */\n/* ======================================================================== */\nPV_STATUS quickSearchDCM(BitstreamDecVideo *stream)\n// DCM: (110 1011 0000 0000 0001)\n{\n    PV_STATUS status;\n    uint32 tmpvar, tmpvar2;\n\n    if (stream->searched_frame_boundary == 0)\n    {\n        PVLocateM4VFrameBoundary(stream);\n    }\n\n    while (TRUE)\n    {\n        status = BitstreamCheckEndBuffer(stream);\n        if (status == PV_END_OF_VOP) return PV_END_OF_VOP;\n        BitstreamShowBits32(stream, 19, &tmpvar);\n\n        if (tmpvar & 1) //  Check if the 17th bit from the curr bit pos is a '1'\n        {\n            if (tmpvar == DC_MARKER)\n            {\n                return PV_SUCCESS; //  Found\n            }\n            else\n            {\n                //  01/25/99 We treat the last of the 19 bits as its 7th bit (which is\n                // also a `1'\n                PV_BitstreamFlushBits(stream, 12);\n            }\n        }\n        else\n        {\n            tmpvar >>= 1;\n            tmpvar2 = tmpvar & 0xF;\n\n            if (tmpvar2)\n            {\n                PV_BitstreamFlushBits(stream, (int)(7 + lastOne[tmpvar2]));\n            }\n            else\n            {\n                tmpvar >>= 4;\n                tmpvar2 = tmpvar & 0xF;\n                if (tmpvar2)\n                {\n                    PV_BitstreamFlushBits(stream, (int)(3 + lastOne[tmpvar2]));\n                }\n                else\n                {\n                    tmpvar >>= 4;\n                    tmpvar2 = tmpvar & 0xF;\n                    if (lastOne[tmpvar2] < 2)\n                    {\n                        /* we already have too many consecutive 0's. */\n                        /* Go directly pass the last of the 17 bits. */\n                        PV_BitstreamFlushBits(stream, 19);\n                    }\n                    else\n                    {\n                        PV_BitstreamFlushBits(stream, (int)(lastOne[tmpvar2] - 1));\n                    }\n                }\n            }\n        }\n    }\n}\n\n/* ======================================================================== */\n/*  Function : quickSearchGOBHeader()   0000 0000 0000 0000 1               */\n/*  Date     : 07/06/01                                                     */\n/*  Purpose  : Quick search of GOBHeader (not byte aligned)                 */\n/*  In/out   :                                                              */\n/*  Return   : Integer value indicates type of marker found                 */\n/*  Modified :                                                              */\n/* ======================================================================== */\nPV_STATUS quickSearchGOBHeader(BitstreamDecVideo *stream)\n{\n    PV_STATUS status;\n    int byte0, byte1, byte2, shift, tmpvar;\n\n    BitstreamByteAlignNoForceStuffing(stream);\n\n    if (stream->searched_frame_boundary == 0)\n    {\n        PVLocateH263FrameBoundary(stream);\n    }\n\n    while (TRUE)\n    {\n        status = BitstreamCheckEndBuffer(stream);\n        if (status == PV_END_OF_VOP) return PV_END_OF_VOP;\n\n        if (stream->incnt < 24)\n        {\n            status = BitstreamFillCache(stream);\n        }\n\n\n        byte1 = (stream->curr_word << 8) >> 24;\n        if (byte1 == 0)\n        {\n            byte2 = (stream->curr_word << 16) >> 24;\n            if (byte2)\n            {\n                tmpvar = byte2 >> 4;\n\n                if (tmpvar)\n                {\n                    shift = 9 - firstOne[tmpvar];\n                }\n                else\n                {\n                    shift = 5 - firstOne[byte2];\n                }\n                byte0 = stream->curr_word >> 24;\n                if ((byte0 & msk[shift]) == 0)\n                {\n                    PV_BitstreamFlushBits(stream, 8 - shift);\n                    return PV_SUCCESS;\n                }\n                PV_BitstreamFlushBits(stream, 8);    /* third_byte is not zero */\n            }\n        }\n\n        PV_BitstreamFlushBits(stream, 8);\n    }\n}\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/bitstream.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n\n#ifndef _BITSTREAM_D_H_\n#define _BITSTREAM_D_H_\n\n#include \"mp4dec_lib.h\" /* video decoder function prototypes */\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif /* __cplusplus */\n\n#define PV_BS_INLINE  /* support inline bitstream functions */\n\n#define PV_BitstreamFlushBits(A,B)  {(A)->bitcnt += (B); (A)->incnt -= (B); (A)->curr_word <<= (B);}\n\n    PV_STATUS BitstreamFillBuffer(BitstreamDecVideo *stream);\n    PV_STATUS BitstreamFillCache(BitstreamDecVideo *stream);\n    void BitstreamReset(BitstreamDecVideo *stream, uint8 *buffer, int32 buffer_size);\n    int BitstreamOpen(BitstreamDecVideo *stream, int layer);\n    void BitstreamClose(BitstreamDecVideo *stream);\n\n    PV_STATUS BitstreamShowBits32(BitstreamDecVideo *stream, int nbits, uint32 *code);\n    uint32 BitstreamReadBits32(BitstreamDecVideo *stream, int nbits);\n\n    uint BitstreamReadBits16(BitstreamDecVideo *stream, int nbits);\n    uint BitstreamRead1Bits(BitstreamDecVideo *stream);\n#ifndef PV_BS_INLINE\n    PV_STATUS BitstreamShowBits16(BitstreamDecVideo *stream, int nbits, uint *code);\n    PV_STATUS BitstreamShow15Bits(BitstreamDecVideo *stream, uint *code);\n    PV_STATUS BitstreamShow13Bits(BitstreamDecVideo *stream, uint *code);\n    uint BitstreamReadBits16_INLINE(BitstreamDecVideo *stream, int nbits);\n    uint BitstreamRead1Bits_INLINE(BitstreamDecVideo *stream);\n#else\n    __inline PV_STATUS BitstreamShowBits16(BitstreamDecVideo *stream, int nbits, uint *code)\n    {\n        PV_STATUS status = PV_SUCCESS;\n\n\n        if (stream->incnt < nbits)\n        {\n            /* frame-based decoding */\n            status = BitstreamFillCache(stream);\n        }\n\n        *code = stream->curr_word >> (32 - nbits);\n        return status;\n    }\n\n\n\n    /* =========================================================================*/\n    __inline PV_STATUS BitstreamShow15Bits(BitstreamDecVideo *stream, uint *code)\n    {\n        PV_STATUS status = PV_SUCCESS;\n\n        if (stream->incnt < 15)\n        {\n            /* frame-based decoding */\n            status = BitstreamFillCache(stream);\n        }\n        *code = stream->curr_word >> 17;\n        return status;\n    }\n\n\n    __inline PV_STATUS BitstreamShow13Bits(BitstreamDecVideo *stream, uint *code)\n    {\n        PV_STATUS status = PV_SUCCESS;\n\n        if (stream->incnt < 13)\n        {\n            /* frame-based decoding */\n            status = BitstreamFillCache(stream);\n        }\n        *code = stream->curr_word >> 19;\n        return status;\n    }\n    __inline uint BitstreamReadBits16_INLINE(BitstreamDecVideo *stream, int nbits)\n    {\n        uint code;\n\n        if (stream->incnt < nbits)\n        {\n            /* frame-based decoding */\n            BitstreamFillCache(stream);\n        }\n        code = stream->curr_word >> (32 - nbits);\n        PV_BitstreamFlushBits(stream, nbits);\n        return code;\n    }\n\n\n    __inline uint BitstreamRead1Bits_INLINE(BitstreamDecVideo *stream)\n    {\n        uint    code;\n\n        if (stream->incnt < 1)\n        {\n            /* frame-based decoding */\n            BitstreamFillCache(stream);\n        }\n        code = stream->curr_word >> 31;\n        PV_BitstreamFlushBits(stream, 1);\n\n        return code;\n    }\n\n#endif\n\n\n\n\n\n\n\n    PV_STATUS PV_BitstreamFlushBitsCheck(BitstreamDecVideo *stream, int nbits);\n\n    uint32 BitstreamReadBits32HC(BitstreamDecVideo *stream);\n    PV_STATUS BitstreamShowBits32HC(BitstreamDecVideo *stream, uint32 *code);\n\n\n\n    PV_STATUS BitstreamCheckEndBuffer(BitstreamDecVideo *stream);\n\n    PV_STATUS PV_BitstreamShowBitsByteAlign(BitstreamDecVideo *stream, int nbits, uint32 *code);\n#ifdef PV_ANNEX_IJKT_SUPPORT\n    PV_STATUS PV_BitstreamShowBitsByteAlignNoForceStuffing(BitstreamDecVideo *stream, int nbits, uint32 *code);\n    Bool validStuffing_h263(BitstreamDecVideo *stream);\n    PV_STATUS quickSearchH263SliceHeader(BitstreamDecVideo *stream);\n#endif\n    PV_STATUS PV_BitstreamByteAlign(BitstreamDecVideo *stream);\n    PV_STATUS BitstreamByteAlignNoForceStuffing(BitstreamDecVideo *stream);\n    Bool validStuffing(BitstreamDecVideo *stream);\n\n    PV_STATUS movePointerTo(BitstreamDecVideo *stream, int32 pos);\n    PV_STATUS PVSearchNextM4VFrame(BitstreamDecVideo *stream);\n    PV_STATUS PVSearchNextH263Frame(BitstreamDecVideo *stream);\n    PV_STATUS quickSearchVideoPacketHeader(BitstreamDecVideo *stream, int marker_length);\n\n\n    /* for error concealment & soft-decoding */\n    void PVLocateM4VFrameBoundary(BitstreamDecVideo *stream);\n    void PVSearchH263FrameBoundary(BitstreamDecVideo *stream);\n\n    PV_STATUS quickSearchMotionMarker(BitstreamDecVideo *stream);\n    PV_STATUS quickSearchDCM(BitstreamDecVideo *stream);\n    PV_STATUS quickSearchGOBHeader(BitstreamDecVideo *stream);\n    void BitstreamShowBuffer(BitstreamDecVideo *stream, int32 startbit, int32 endbit, uint8 *bitBfr);\n\n    /*  10/8/98 New prototyps. */\n    int32 getPointer(BitstreamDecVideo *stream);\n\n#ifdef __cplusplus\n}\n#endif /* __cplusplus  */\n\n#endif /* _BITSTREAM_D_H_ */\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/block_idct.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*\n------------------------------------------------------------------------------\n INPUT AND OUTPUT DEFINITIONS\n\n Inputs:\n    [input_variable_name] = [description of the input to module, its type\n                 definition, and length (when applicable)]\n\n Local Stores/Buffers/Pointers Needed:\n    [local_store_name] = [description of the local store, its type\n                  definition, and length (when applicable)]\n    [local_buffer_name] = [description of the local buffer, its type\n                   definition, and length (when applicable)]\n    [local_ptr_name] = [description of the local pointer, its type\n                definition, and length (when applicable)]\n\n Global Stores/Buffers/Pointers Needed:\n    [global_store_name] = [description of the global store, its type\n                   definition, and length (when applicable)]\n    [global_buffer_name] = [description of the global buffer, its type\n                definition, and length (when applicable)]\n    [global_ptr_name] = [description of the global pointer, its type\n                 definition, and length (when applicable)]\n\n Outputs:\n    [return_variable_name] = [description of data/pointer returned\n                  by module, its type definition, and length\n                  (when applicable)]\n\n Pointers and Buffers Modified:\n    [variable_bfr_ptr] points to the [describe where the\n      variable_bfr_ptr points to, its type definition, and length\n      (when applicable)]\n    [variable_bfr] contents are [describe the new contents of\n      variable_bfr]\n\n Local Stores Modified:\n    [local_store_name] = [describe new contents, its type\n                  definition, and length (when applicable)]\n\n Global Stores Modified:\n    [global_store_name] = [describe new contents, its type\n                   definition, and length (when applicable)]\n\n------------------------------------------------------------------------------\n FUNCTION DESCRIPTION\n\n------------------------------------------------------------------------------\n REQUIREMENTS\n\n------------------------------------------------------------------------------\n REFERENCES\n\n------------------------------------------------------------------------------\n PSEUDO-CODE\n\n------------------------------------------------------------------------------\n RESOURCES USED\n   When the code is written for a specific target processor the\n     the resources used should be documented below.\n\n STACK USAGE: [stack count for this module] + [variable to represent\n          stack usage for each subroutine called]\n\n     where: [stack usage variable] = stack usage for [subroutine\n         name] (see [filename].ext)\n\n DATA MEMORY USED: x words\n\n PROGRAM MEMORY USED: x words\n\n CLOCK CYCLES: [cycle count equation for this module] + [variable\n           used to represent cycle count for each subroutine\n           called]\n\n     where: [cycle count variable] = cycle count for [subroutine\n        name] (see [filename].ext)\n\n------------------------------------------------------------------------------\n*/\n\n/*----------------------------------------------------------------------------\n; INCLUDES\n----------------------------------------------------------------------------*/\n#include \"mp4dec_lib.h\"\n#include \"idct.h\"\n#include \"motion_comp.h\"\n\n#define OSCL_DISABLE_WARNING_CONV_POSSIBLE_LOSS_OF_DATA\n#include \"osclconfig_compiler_warnings.h\"\n/*----------------------------------------------------------------------------\n; MACROS\n; Define module specific macros here\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; DEFINES\n; Include all pre-processor statements here. Include conditional\n; compile variables also.\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; LOCAL FUNCTION DEFINITIONS\n; Function Prototype declaration\n----------------------------------------------------------------------------*/\n/* private prototypes */\nstatic void idctrow(int16 *blk, uint8 *pred, uint8 *dst, int width);\nstatic void idctrow_intra(int16 *blk, PIXEL *, int width);\nstatic void idctcol(int16 *blk);\n\n#ifdef FAST_IDCT\n// mapping from nz_coefs to functions to be used\n\n\n// ARM4 does not allow global data when they are not constant hence\n// an array of function pointers cannot be considered as array of constants\n// (actual addresses are only known when the dll is loaded).\n// So instead of arrays of function pointers, we'll store here\n// arrays of rows or columns and then call the idct function\n// corresponding to such the row/column number:\n\n\nstatic void (*const idctcolVCA[10][4])(int16*) =\n{\n    {&idctcol1, &idctcol0, &idctcol0, &idctcol0},\n    {&idctcol1, &idctcol1, &idctcol0, &idctcol0},\n    {&idctcol2, &idctcol1, &idctcol0, &idctcol0},\n    {&idctcol3, &idctcol1, &idctcol0, &idctcol0},\n    {&idctcol3, &idctcol2, &idctcol0, &idctcol0},\n    {&idctcol3, &idctcol2, &idctcol1, &idctcol0},\n    {&idctcol3, &idctcol2, &idctcol1, &idctcol1},\n    {&idctcol3, &idctcol2, &idctcol2, &idctcol1},\n    {&idctcol3, &idctcol3, &idctcol2, &idctcol1},\n    {&idctcol4, &idctcol3, &idctcol2, &idctcol1}\n};\n\n\nstatic void (*const idctrowVCA[10])(int16*, uint8*, uint8*, int) =\n{\n    &idctrow1,\n    &idctrow2,\n    &idctrow2,\n    &idctrow2,\n    &idctrow2,\n    &idctrow3,\n    &idctrow4,\n    &idctrow4,\n    &idctrow4,\n    &idctrow4\n};\n\n\nstatic void (*const idctcolVCA2[16])(int16*) =\n{\n    &idctcol0, &idctcol4, &idctcol3, &idctcol4,\n    &idctcol2, &idctcol4, &idctcol3, &idctcol4,\n    &idctcol1, &idctcol4, &idctcol3, &idctcol4,\n    &idctcol2, &idctcol4, &idctcol3, &idctcol4\n};\n\nstatic void (*const idctrowVCA2[8])(int16*, uint8*, uint8*, int) =\n{\n    &idctrow1, &idctrow4, &idctrow3, &idctrow4,\n    &idctrow2, &idctrow4, &idctrow3, &idctrow4\n};\n\nstatic void (*const idctrowVCA_intra[10])(int16*, PIXEL *, int) =\n{\n    &idctrow1_intra,\n    &idctrow2_intra,\n    &idctrow2_intra,\n    &idctrow2_intra,\n    &idctrow2_intra,\n    &idctrow3_intra,\n    &idctrow4_intra,\n    &idctrow4_intra,\n    &idctrow4_intra,\n    &idctrow4_intra\n};\n\nstatic void (*const idctrowVCA2_intra[8])(int16*, PIXEL *, int) =\n{\n    &idctrow1_intra, &idctrow4_intra, &idctrow3_intra, &idctrow4_intra,\n    &idctrow2_intra, &idctrow4_intra, &idctrow3_intra, &idctrow4_intra\n};\n#endif\n\n/*----------------------------------------------------------------------------\n; LOCAL STORE/BUFFER/POINTER DEFINITIONS\n; Variable declaration - defined here and used outside this module\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; EXTERNAL FUNCTION REFERENCES\n; Declare functions defined elsewhere and referenced in this module\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES\n; Declare variables used in this module but defined elsewhere\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; FUNCTION CODE\n----------------------------------------------------------------------------*/\nvoid MBlockIDCT(VideoDecData *video)\n{\n    Vop *currVop = video->currVop;\n    MacroBlock *mblock = video->mblock;\n    PIXEL *c_comp;\n    PIXEL *cu_comp;\n    PIXEL *cv_comp;\n    int x_pos = video->mbnum_col;\n    int y_pos = video->mbnum_row;\n    int width, width_uv;\n    int32 offset;\n    width = video->width;\n    width_uv = width >> 1;\n    offset = (int32)(y_pos << 4) * width + (x_pos << 4);\n\n    c_comp  = currVop->yChan + offset;\n    cu_comp = currVop->uChan + (offset >> 2) + (x_pos << 2);\n    cv_comp = currVop->vChan + (offset >> 2) + (x_pos << 2);\n\n    BlockIDCT_intra(mblock, c_comp, 0, width);\n    BlockIDCT_intra(mblock, c_comp + 8, 1, width);\n    BlockIDCT_intra(mblock, c_comp + (width << 3), 2, width);\n    BlockIDCT_intra(mblock, c_comp + (width << 3) + 8, 3, width);\n    BlockIDCT_intra(mblock, cu_comp, 4, width_uv);\n    BlockIDCT_intra(mblock, cv_comp, 5, width_uv);\n}\n\n\nvoid BlockIDCT_intra(\n    MacroBlock *mblock, PIXEL *c_comp, int comp, int width)\n{\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    int16 *coeff_in = mblock->block[comp];\n#ifdef INTEGER_IDCT\n#ifdef FAST_IDCT  /* VCA IDCT using nzcoefs and bitmaps*/\n    int i, bmapr;\n    int nz_coefs = mblock->no_coeff[comp];\n    uint8 *bitmapcol = mblock->bitmapcol[comp];\n    uint8 bitmaprow = mblock->bitmaprow[comp];\n\n    /*----------------------------------------------------------------------------\n    ; Function body here\n    ----------------------------------------------------------------------------*/\n    if (nz_coefs <= 10)\n    {\n        bmapr = (nz_coefs - 1);\n\n        (*(idctcolVCA[bmapr]))(coeff_in);\n        (*(idctcolVCA[bmapr][1]))(coeff_in + 1);\n        (*(idctcolVCA[bmapr][2]))(coeff_in + 2);\n        (*(idctcolVCA[bmapr][3]))(coeff_in + 3);\n\n        (*idctrowVCA_intra[nz_coefs-1])(coeff_in, c_comp, width);\n    }\n    else\n    {\n        i = 8;\n        while (i--)\n        {\n            bmapr = (int)bitmapcol[i];\n            if (bmapr)\n            {\n                if ((bmapr&0xf) == 0)         /*  07/18/01 */\n                {\n                    (*(idctcolVCA2[bmapr>>4]))(coeff_in + i);\n                }\n                else\n                {\n                    idctcol(coeff_in + i);\n                }\n            }\n        }\n        if ((bitmapcol[4] | bitmapcol[5] | bitmapcol[6] | bitmapcol[7]) == 0)\n        {\n            bitmaprow >>= 4;\n            (*(idctrowVCA2_intra[(int)bitmaprow]))(coeff_in, c_comp, width);\n        }\n        else\n        {\n            idctrow_intra(coeff_in, c_comp, width);\n        }\n    }\n#else\n    void idct_intra(int *block, uint8 *comp, int width);\n    idct_intra(coeff_in, c_comp, width);\n#endif\n#else\n    void idctref_intra(int *block, uint8 *comp, int width);\n    idctref_intra(coeff_in, c_comp, width);\n#endif\n\n\n    /*----------------------------------------------------------------------------\n    ; Return nothing or data or data pointer\n    ----------------------------------------------------------------------------*/\n    return;\n}\n\n/*  08/04/05, no residue, just copy from pred to output */\nvoid Copy_Blk_to_Vop(uint8 *dst, uint8 *pred, int width)\n{\n    /* copy 4 bytes at a time */\n    width -= 4;\n    *((uint32*)dst) = *((uint32*)pred);\n    *((uint32*)(dst += 4)) = *((uint32*)(pred += 4));\n    *((uint32*)(dst += width)) = *((uint32*)(pred += 12));\n    *((uint32*)(dst += 4)) = *((uint32*)(pred += 4));\n    *((uint32*)(dst += width)) = *((uint32*)(pred += 12));\n    *((uint32*)(dst += 4)) = *((uint32*)(pred += 4));\n    *((uint32*)(dst += width)) = *((uint32*)(pred += 12));\n    *((uint32*)(dst += 4)) = *((uint32*)(pred += 4));\n    *((uint32*)(dst += width)) = *((uint32*)(pred += 12));\n    *((uint32*)(dst += 4)) = *((uint32*)(pred += 4));\n    *((uint32*)(dst += width)) = *((uint32*)(pred += 12));\n    *((uint32*)(dst += 4)) = *((uint32*)(pred += 4));\n    *((uint32*)(dst += width)) = *((uint32*)(pred += 12));\n    *((uint32*)(dst += 4)) = *((uint32*)(pred += 4));\n    *((uint32*)(dst += width)) = *((uint32*)(pred += 12));\n    *((uint32*)(dst += 4)) = *((uint32*)(pred += 4));\n\n    return ;\n}\n\n/*  08/04/05 compute IDCT and add prediction at the end  */\nvoid BlockIDCT(\n    uint8 *dst,  /* destination */\n    uint8 *pred, /* prediction block, pitch 16 */\n    int16   *coeff_in,  /* DCT data, size 64 */\n    int width, /* width of dst */\n    int nz_coefs,\n    uint8 *bitmapcol,\n    uint8 bitmaprow\n)\n{\n#ifdef INTEGER_IDCT\n#ifdef FAST_IDCT  /* VCA IDCT using nzcoefs and bitmaps*/\n    int i, bmapr;\n    /*----------------------------------------------------------------------------\n    ; Function body here\n    ----------------------------------------------------------------------------*/\n    if (nz_coefs <= 10)\n    {\n        bmapr = (nz_coefs - 1);\n        (*(idctcolVCA[bmapr]))(coeff_in);\n        (*(idctcolVCA[bmapr][1]))(coeff_in + 1);\n        (*(idctcolVCA[bmapr][2]))(coeff_in + 2);\n        (*(idctcolVCA[bmapr][3]))(coeff_in + 3);\n\n        (*idctrowVCA[nz_coefs-1])(coeff_in, pred, dst, width);\n        return ;\n    }\n    else\n    {\n        i = 8;\n\n        while (i--)\n        {\n            bmapr = (int)bitmapcol[i];\n            if (bmapr)\n            {\n                if ((bmapr&0xf) == 0)         /*  07/18/01 */\n                {\n                    (*(idctcolVCA2[bmapr>>4]))(coeff_in + i);\n                }\n                else\n                {\n                    idctcol(coeff_in + i);\n                }\n            }\n        }\n        if ((bitmapcol[4] | bitmapcol[5] | bitmapcol[6] | bitmapcol[7]) == 0)\n        {\n            (*(idctrowVCA2[bitmaprow>>4]))(coeff_in, pred, dst, width);\n        }\n        else\n        {\n            idctrow(coeff_in, pred, dst, width);\n        }\n        return ;\n    }\n#else // FAST_IDCT\n    void idct(int *block, uint8 *pred, uint8 *dst, int width);\n    idct(coeff_in, pred, dst, width);\n    return;\n#endif // FAST_IDCT\n#else // INTEGER_IDCT\n    void idctref(int *block, uint8 *pred, uint8 *dst, int width);\n    idctref(coeff_in, pred, dst, width);\n    return;\n#endif // INTEGER_IDCT\n\n}\n/*----------------------------------------------------------------------------\n;  End Function: block_idct\n----------------------------------------------------------------------------*/\n\n\n/****************************************************************************/\n\n/*\n------------------------------------------------------------------------------\n FUNCTION NAME: idctrow\n------------------------------------------------------------------------------\n INPUT AND OUTPUT DEFINITIONS FOR idctrow\n\n Inputs:\n    [input_variable_name] = [description of the input to module, its type\n                 definition, and length (when applicable)]\n\n Local Stores/Buffers/Pointers Needed:\n    [local_store_name] = [description of the local store, its type\n                  definition, and length (when applicable)]\n    [local_buffer_name] = [description of the local buffer, its type\n                   definition, and length (when applicable)]\n    [local_ptr_name] = [description of the local pointer, its type\n                definition, and length (when applicable)]\n\n Global Stores/Buffers/Pointers Needed:\n    [global_store_name] = [description of the global store, its type\n                   definition, and length (when applicable)]\n    [global_buffer_name] = [description of the global buffer, its type\n                definition, and length (when applicable)]\n    [global_ptr_name] = [description of the global pointer, its type\n                 definition, and length (when applicable)]\n\n Outputs:\n    [return_variable_name] = [description of data/pointer returned\n                  by module, its type definition, and length\n                  (when applicable)]\n\n Pointers and Buffers Modified:\n    [variable_bfr_ptr] points to the [describe where the\n      variable_bfr_ptr points to, its type definition, and length\n      (when applicable)]\n    [variable_bfr] contents are [describe the new contents of\n      variable_bfr]\n\n Local Stores Modified:\n    [local_store_name] = [describe new contents, its type\n                  definition, and length (when applicable)]\n\n Global Stores Modified:\n    [global_store_name] = [describe new contents, its type\n                   definition, and length (when applicable)]\n\n------------------------------------------------------------------------------\n FUNCTION DESCRIPTION FOR idctrow\n\n------------------------------------------------------------------------------\n REQUIREMENTS FOR idctrow\n\n------------------------------------------------------------------------------\n REFERENCES FOR idctrow\n\n------------------------------------------------------------------------------\n PSEUDO-CODE FOR idctrow\n\n------------------------------------------------------------------------------\n RESOURCES USED FOR idctrow\n   When the code is written for a specific target processor the\n     the resources used should be documented below.\n\n STACK USAGE: [stack count for this module] + [variable to represent\n          stack usage for each subroutine called]\n\n     where: [stack usage variable] = stack usage for [subroutine\n         name] (see [filename].ext)\n\n DATA MEMORY USED: x words\n\n PROGRAM MEMORY USED: x words\n\n CLOCK CYCLES: [cycle count equation for this module] + [variable\n           used to represent cycle count for each subroutine\n           called]\n\n     where: [cycle count variable] = cycle count for [subroutine\n        name] (see [filename].ext)\n\n------------------------------------------------------------------------------\n*/\n\n/*----------------------------------------------------------------------------\n; Function Code FOR idctrow\n----------------------------------------------------------------------------*/\nvoid idctrow(\n    int16 *blk, uint8 *pred, uint8 *dst, int width\n)\n{\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;\n    int i = 8;\n    uint32 pred_word, dst_word;\n    int res, res2;\n\n    /*----------------------------------------------------------------------------\n    ; Function body here\n    ----------------------------------------------------------------------------*/\n    /* row (horizontal) IDCT\n    *\n    * 7                       pi         1 dst[k] = sum c[l] * src[l] * cos( -- *\n    * ( k + - ) * l ) l=0                      8          2\n    *\n    * where: c[0]    = 128 c[1..7] = 128*sqrt(2) */\n\n    /* preset the offset, such that we can take advantage pre-offset addressing mode   */\n    width -= 4;\n    dst -= width;\n    pred -= 12;\n    blk -= 8;\n\n    while (i--)\n    {\n        x1 = (int32)blk[12] << 8;\n        blk[12] = 0;\n        x2 = blk[14];\n        blk[14] = 0;\n        x3 = blk[10];\n        blk[10] = 0;\n        x4 = blk[9];\n        blk[9] = 0;\n        x5 = blk[15];\n        blk[15] = 0;\n        x6 = blk[13];\n        blk[13] = 0;\n        x7 = blk[11];\n        blk[11] = 0;\n        x0 = ((*(blk += 8)) << 8) + 8192;\n        blk[0] = 0;   /* for proper rounding in the fourth stage */\n\n        /* first stage */\n        x8 = W7 * (x4 + x5) + 4;\n        x4 = (x8 + (W1 - W7) * x4) >> 3;\n        x5 = (x8 - (W1 + W7) * x5) >> 3;\n        x8 = W3 * (x6 + x7) + 4;\n        x6 = (x8 - (W3 - W5) * x6) >> 3;\n        x7 = (x8 - (W3 + W5) * x7) >> 3;\n\n        /* second stage */\n        x8 = x0 + x1;\n        x0 -= x1;\n        x1 = W6 * (x3 + x2) + 4;\n        x2 = (x1 - (W2 + W6) * x2) >> 3;\n        x3 = (x1 + (W2 - W6) * x3) >> 3;\n        x1 = x4 + x6;\n        x4 -= x6;\n        x6 = x5 + x7;\n        x5 -= x7;\n\n        /* third stage */\n        x7 = x8 + x3;\n        x8 -= x3;\n        x3 = x0 + x2;\n        x0 -= x2;\n        x2 = (181 * (x4 + x5) + 128) >> 8;\n        x4 = (181 * (x4 - x5) + 128) >> 8;\n\n        /* fourth stage */\n        pred_word = *((uint32*)(pred += 12)); /* read 4 bytes from pred */\n\n        res = (x7 + x1) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x3 + x2) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x0 + x4) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x8 + x6) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(dst += width)) = dst_word; /* save 4 bytes to dst */\n\n        pred_word = *((uint32*)(pred += 4)); /* read 4 bytes from pred */\n\n        res = (x8 - x6) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x0 - x4) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x3 - x2) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x7 - x1) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(dst += 4)) = dst_word; /* save 4 bytes to dst */\n    }\n    /*----------------------------------------------------------------------------\n    ; Return nothing or data or data pointer\n    ----------------------------------------------------------------------------*/\n    return;\n}\n\nvoid idctrow_intra(\n    int16 *blk, PIXEL *comp, int width\n)\n{\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    int32 x0, x1, x2, x3, x4, x5, x6, x7, x8, temp;\n    int i = 8;\n    int offset = width;\n    int32 word;\n\n    /*----------------------------------------------------------------------------\n    ; Function body here\n    ----------------------------------------------------------------------------*/\n    /* row (horizontal) IDCT\n    *\n    * 7                       pi         1 dst[k] = sum c[l] * src[l] * cos( -- *\n    * ( k + - ) * l ) l=0                      8          2\n    *\n    * where: c[0]    = 128 c[1..7] = 128*sqrt(2) */\n    while (i--)\n    {\n        x1 = (int32)blk[4] << 8;\n        blk[4] = 0;\n        x2 = blk[6];\n        blk[6] = 0;\n        x3 = blk[2];\n        blk[2] = 0;\n        x4 = blk[1];\n        blk[1] = 0;\n        x5 = blk[7];\n        blk[7] = 0;\n        x6 = blk[5];\n        blk[5] = 0;\n        x7 = blk[3];\n        blk[3] = 0;\n#ifndef FAST_IDCT\n        /* shortcut */  /* covered by idctrow1  01/9/2001 */\n        if (!(x1 | x2 | x3 | x4 | x5 | x6 | x7))\n        {\n            blk[0] = blk[1] = blk[2] = blk[3] = blk[4] = blk[5] = blk[6] = blk[7] = (blk[0] + 32) >> 6;\n            return;\n        }\n#endif\n        x0 = ((int32)blk[0] << 8) + 8192;\n        blk[0] = 0;  /* for proper rounding in the fourth stage */\n\n        /* first stage */\n        x8 = W7 * (x4 + x5) + 4;\n        x4 = (x8 + (W1 - W7) * x4) >> 3;\n        x5 = (x8 - (W1 + W7) * x5) >> 3;\n        x8 = W3 * (x6 + x7) + 4;\n        x6 = (x8 - (W3 - W5) * x6) >> 3;\n        x7 = (x8 - (W3 + W5) * x7) >> 3;\n\n        /* second stage */\n        x8 = x0 + x1;\n        x0 -= x1;\n        x1 = W6 * (x3 + x2) + 4;\n        x2 = (x1 - (W2 + W6) * x2) >> 3;\n        x3 = (x1 + (W2 - W6) * x3) >> 3;\n        x1 = x4 + x6;\n        x4 -= x6;\n        x6 = x5 + x7;\n        x5 -= x7;\n\n        /* third stage */\n        x7 = x8 + x3;\n        x8 -= x3;\n        x3 = x0 + x2;\n        x0 -= x2;\n        x2 = (181 * (x4 + x5) + 128) >> 8;\n        x4 = (181 * (x4 - x5) + 128) >> 8;\n\n        /* fourth stage */\n        word = ((x7 + x1) >> 14);\n        CLIP_RESULT(word)\n\n        temp = ((x3 + x2) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 8);\n\n        temp = ((x0 + x4) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 16);\n\n        temp = ((x8 + x6) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 24);\n        *((int32*)(comp)) = word;\n\n        word = ((x8 - x6) >> 14);\n        CLIP_RESULT(word)\n\n        temp = ((x0 - x4) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 8);\n\n        temp = ((x3 - x2) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 16);\n\n        temp = ((x7 - x1) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 24);\n        *((int32*)(comp + 4)) = word;\n        comp += offset;\n\n        blk += B_SIZE;\n    }\n    /*----------------------------------------------------------------------------\n    ; Return nothing or data or data pointer\n    ----------------------------------------------------------------------------*/\n    return;\n}\n\n/*----------------------------------------------------------------------------\n; End Function: idctrow\n----------------------------------------------------------------------------*/\n\n\n/****************************************************************************/\n\n/*\n------------------------------------------------------------------------------\n FUNCTION NAME: idctcol\n------------------------------------------------------------------------------\n INPUT AND OUTPUT DEFINITIONS FOR idctcol\n\n Inputs:\n    [input_variable_name] = [description of the input to module, its type\n                 definition, and length (when applicable)]\n\n Local Stores/Buffers/Pointers Needed:\n    [local_store_name] = [description of the local store, its type\n                  definition, and length (when applicable)]\n    [local_buffer_name] = [description of the local buffer, its type\n                   definition, and length (when applicable)]\n    [local_ptr_name] = [description of the local pointer, its type\n                definition, and length (when applicable)]\n\n Global Stores/Buffers/Pointers Needed:\n    [global_store_name] = [description of the global store, its type\n                   definition, and length (when applicable)]\n    [global_buffer_name] = [description of the global buffer, its type\n                definition, and length (when applicable)]\n    [global_ptr_name] = [description of the global pointer, its type\n                 definition, and length (when applicable)]\n\n Outputs:\n    [return_variable_name] = [description of data/pointer returned\n                  by module, its type definition, and length\n                  (when applicable)]\n\n Pointers and Buffers Modified:\n    [variable_bfr_ptr] points to the [describe where the\n      variable_bfr_ptr points to, its type definition, and length\n      (when applicable)]\n    [variable_bfr] contents are [describe the new contents of\n      variable_bfr]\n\n Local Stores Modified:\n    [local_store_name] = [describe new contents, its type\n                  definition, and length (when applicable)]\n\n Global Stores Modified:\n    [global_store_name] = [describe new contents, its type\n                   definition, and length (when applicable)]\n\n------------------------------------------------------------------------------\n FUNCTION DESCRIPTION FOR idctcol\n\n------------------------------------------------------------------------------\n REQUIREMENTS FOR idctcol\n\n------------------------------------------------------------------------------\n REFERENCES FOR idctcol\n\n------------------------------------------------------------------------------\n PSEUDO-CODE FOR idctcol\n\n------------------------------------------------------------------------------\n RESOURCES USED FOR idctcol\n   When the code is written for a specific target processor the\n     the resources used should be documented below.\n\n STACK USAGE: [stack count for this module] + [variable to represent\n          stack usage for each subroutine called]\n\n     where: [stack usage variable] = stack usage for [subroutine\n         name] (see [filename].ext)\n\n DATA MEMORY USED: x words\n\n PROGRAM MEMORY USED: x words\n\n CLOCK CYCLES: [cycle count equation for this module] + [variable\n           used to represent cycle count for each subroutine\n           called]\n\n     where: [cycle count variable] = cycle count for [subroutine\n        name] (see [filename].ext)\n\n------------------------------------------------------------------------------\n*/\n\n/*----------------------------------------------------------------------------\n; Function Code FOR idctcol\n----------------------------------------------------------------------------*/\nvoid idctcol(\n    int16 *blk\n)\n{\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;\n\n    /*----------------------------------------------------------------------------\n    ; Function body here\n    ----------------------------------------------------------------------------*/\n    /* column (vertical) IDCT\n    *\n    * 7                         pi         1 dst[8*k] = sum c[l] * src[8*l] *\n    * cos( -- * ( k + - ) * l ) l=0                        8          2\n    *\n    * where: c[0]    = 1/1024 c[1..7] = (1/1024)*sqrt(2) */\n    x1 = (int32)blk[32] << 11;\n    x2 = blk[48];\n    x3 = blk[16];\n    x4 = blk[8];\n    x5 = blk[56];\n    x6 = blk[40];\n    x7 = blk[24];\n#ifndef FAST_IDCT\n    /* shortcut */        /* covered by idctcolumn1  01/9/2001 */\n    if (!(x1 | x2 | x3 | x4 | x5 | x6 | x7))\n    {\n        blk[0] = blk[8] = blk[16] = blk[24] = blk[32] = blk[40] = blk[48] = blk[56]\n                                              = blk[0] << 3;\n        return;\n    }\n#endif\n\n    x0 = ((int32)blk[0] << 11) + 128;\n\n    /* first stage */\n    x8 = W7 * (x4 + x5);\n    x4 = x8 + (W1 - W7) * x4;\n    x5 = x8 - (W1 + W7) * x5;\n    x8 = W3 * (x6 + x7);\n    x6 = x8 - (W3 - W5) * x6;\n    x7 = x8 - (W3 + W5) * x7;\n\n    /* second stage */\n    x8 = x0 + x1;\n    x0 -= x1;\n    x1 = W6 * (x3 + x2);\n    x2 = x1 - (W2 + W6) * x2;\n    x3 = x1 + (W2 - W6) * x3;\n    x1 = x4 + x6;\n    x4 -= x6;\n    x6 = x5 + x7;\n    x5 -= x7;\n\n    /* third stage */\n    x7 = x8 + x3;\n    x8 -= x3;\n    x3 = x0 + x2;\n    x0 -= x2;\n    x2 = (181 * (x4 + x5) + 128) >> 8;\n    x4 = (181 * (x4 - x5) + 128) >> 8;\n\n    /* fourth stage */\n    blk[0]    = (x7 + x1) >> 8;\n    blk[8] = (x3 + x2) >> 8;\n    blk[16] = (x0 + x4) >> 8;\n    blk[24] = (x8 + x6) >> 8;\n    blk[32] = (x8 - x6) >> 8;\n    blk[40] = (x0 - x4) >> 8;\n    blk[48] = (x3 - x2) >> 8;\n    blk[56] = (x7 - x1) >> 8;\n    /*----------------------------------------------------------------------------\n    ; Return nothing or data or data pointer\n    ----------------------------------------------------------------------------*/\n    return;\n}\n/*----------------------------------------------------------------------------\n;  End Function: idctcol\n----------------------------------------------------------------------------*/\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/cal_dc_scaler.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*\n------------------------------------------------------------------------------\n INPUT AND OUTPUT DEFINITIONS\n\n Inputs:\n    [input_variable_name] = [description of the input to module, its type\n                 definition, and length (when applicable)]\n\n Local Stores/Buffers/Pointers Needed:\n    [local_store_name] = [description of the local store, its type\n                  definition, and length (when applicable)]\n    [local_buffer_name] = [description of the local buffer, its type\n                   definition, and length (when applicable)]\n    [local_ptr_name] = [description of the local pointer, its type\n                definition, and length (when applicable)]\n\n Global Stores/Buffers/Pointers Needed:\n    [global_store_name] = [description of the global store, its type\n                   definition, and length (when applicable)]\n    [global_buffer_name] = [description of the global buffer, its type\n                definition, and length (when applicable)]\n    [global_ptr_name] = [description of the global pointer, its type\n                 definition, and length (when applicable)]\n\n Outputs:\n    [return_variable_name] = [description of data/pointer returned\n                  by module, its type definition, and length\n                  (when applicable)]\n\n Pointers and Buffers Modified:\n    [variable_bfr_ptr] points to the [describe where the\n      variable_bfr_ptr points to, its type definition, and length\n      (when applicable)]\n    [variable_bfr] contents are [describe the new contents of\n      variable_bfr]\n\n Local Stores Modified:\n    [local_store_name] = [describe new contents, its type\n                  definition, and length (when applicable)]\n\n Global Stores Modified:\n    [global_store_name] = [describe new contents, its type\n                   definition, and length (when applicable)]\n\n------------------------------------------------------------------------------\n FUNCTION DESCRIPTION\n\n This module calculates the DC quantization scale according\n to the incoming Q and type.\n\n------------------------------------------------------------------------------\n REQUIREMENTS\n\n [List requirements to be satisfied by this module.]\n\n------------------------------------------------------------------------------\n REFERENCES\n\n [List all references used in designing this module.]\n\n------------------------------------------------------------------------------\n PSEUDO-CODE\n\n------------------------------------------------------------------------------\n RESOURCES USED\n   When the code is written for a specific target processor the\n     the resources used should be documented below.\n\n STACK USAGE: [stack count for this module] + [variable to represent\n          stack usage for each subroutine called]\n\n     where: [stack usage variable] = stack usage for [subroutine\n         name] (see [filename].ext)\n\n DATA MEMORY USED: x words\n\n PROGRAM MEMORY USED: x words\n\n CLOCK CYCLES: [cycle count equation for this module] + [variable\n           used to represent cycle count for each subroutine\n           called]\n\n     where: [cycle count variable] = cycle count for [subroutine\n        name] (see [filename].ext)\n\n------------------------------------------------------------------------------\n*/\n\n\n/*----------------------------------------------------------------------------\n; INCLUDES\n----------------------------------------------------------------------------*/\n#include    \"mp4dec_lib.h\"\n#include    \"vlc_decode.h\"\n#include    \"bitstream.h\"\n#include    \"zigzag.h\"\n\n/*----------------------------------------------------------------------------\n; MACROS\n; Define module specific macros here\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; DEFINES\n; Include all pre-processor statements here. Include conditional\n; compile variables also.\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; LOCAL FUNCTION DEFINITIONS\n; Function Prototype declaration\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; LOCAL STORE/BUFFER/POINTER DEFINITIONS\n; Variable declaration - defined here and used outside this module\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; EXTERNAL FUNCTION REFERENCES\n; Declare functions defined elsewhere and referenced in this module\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES\n; Declare variables used in this module but defined elsewhere\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; FUNCTION CODE\n----------------------------------------------------------------------------*/\nint cal_dc_scaler(\n    int QP,\n    int type)\n{\n\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    int dc_scaler;\n\n    /*----------------------------------------------------------------------------\n    ; Function body here\n    ----------------------------------------------------------------------------*/\n    if (type == LUMINANCE_DC_TYPE)\n    {\n        if (QP > 0 && QP < 5) dc_scaler = 8;\n        else if (QP > 4 && QP < 9) dc_scaler = 2 * QP;\n        else if (QP > 8 && QP < 25) dc_scaler = QP + 8;\n        else dc_scaler = 2 * QP - 16;\n    }\n    else /* if (type == CHROMINANCE_DC_TYPE), there is no other types.  */\n    {\n        if (QP > 0 && QP < 5) dc_scaler = 8;\n        else if (QP > 4 && QP < 25) dc_scaler = (QP + 13) >> 1;\n        else dc_scaler = QP - 6;\n    }\n\n    /*----------------------------------------------------------------------------\n    ; Return nothing or data or data pointer\n    ----------------------------------------------------------------------------*/\n    return dc_scaler;\n}\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/chv_filter.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*\n------------------------------------------------------------------------------\n INPUT AND OUTPUT DEFINITIONS\n\n Inputs:\n    [input_variable_name] = [description of the input to module, its type\n                 definition, and length (when applicable)]\n\n Local Stores/Buffers/Pointers Needed:\n    [local_store_name] = [description of the local store, its type\n                  definition, and length (when applicable)]\n    [local_buffer_name] = [description of the local buffer, its type\n                   definition, and length (when applicable)]\n    [local_ptr_name] = [description of the local pointer, its type\n                definition, and length (when applicable)]\n\n Global Stores/Buffers/Pointers Needed:\n    [global_store_name] = [description of the global store, its type\n                   definition, and length (when applicable)]\n    [global_buffer_name] = [description of the global buffer, its type\n                definition, and length (when applicable)]\n    [global_ptr_name] = [description of the global pointer, its type\n                 definition, and length (when applicable)]\n\n Outputs:\n    [return_variable_name] = [description of data/pointer returned\n                  by module, its type definition, and length\n                  (when applicable)]\n\n Pointers and Buffers Modified:\n    [variable_bfr_ptr] points to the [describe where the\n      variable_bfr_ptr points to, its type definition, and length\n      (when applicable)]\n    [variable_bfr] contents are [describe the new contents of\n      variable_bfr]\n\n Local Stores Modified:\n    [local_store_name] = [describe new contents, its type\n                  definition, and length (when applicable)]\n\n Global Stores Modified:\n    [global_store_name] = [describe new contents, its type\n                   definition, and length (when applicable)]\n\n------------------------------------------------------------------------------\n FUNCTION DESCRIPTION\n\n   For fast Deblock filtering\n   Newer version (macroblock based processing)\n\n------------------------------------------------------------------------------\n REQUIREMENTS\n\n [List requirements to be satisfied by this module.]\n\n------------------------------------------------------------------------------\n REFERENCES\n\n [List all references used in designing this module.]\n\n------------------------------------------------------------------------------\n PSEUDO-CODE\n\n------------------------------------------------------------------------------\n RESOURCES USED\n   When the code is written for a specific target processor the\n     the resources used should be documented below.\n\n STACK USAGE: [stack count for this module] + [variable to represent\n          stack usage for each subroutine called]\n\n     where: [stack usage variable] = stack usage for [subroutine\n         name] (see [filename].ext)\n\n DATA MEMORY USED: x words\n\n PROGRAM MEMORY USED: x words\n\n CLOCK CYCLES: [cycle count equation for this module] + [variable\n           used to represent cycle count for each subroutine\n           called]\n\n     where: [cycle count variable] = cycle count for [subroutine\n        name] (see [filename].ext)\n\n------------------------------------------------------------------------------\n*/\n\n\n/*----------------------------------------------------------------------------\n; INCLUDES\n----------------------------------------------------------------------------*/\n#include    \"mp4dec_lib.h\"\n#include    \"post_proc.h\"\n\n#define OSCL_DISABLE_WARNING_CONV_POSSIBLE_LOSS_OF_DATA\n#include \"osclconfig_compiler_warnings.h\"\n\n/*----------------------------------------------------------------------------\n; MACROS\n; Define module specific macros here\n----------------------------------------------------------------------------*/\n//#define FILTER_LEN_8\n\n/*----------------------------------------------------------------------------\n; DEFINES\n; Include all pre-processor statements here. Include conditional\n; compile variables also.\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; LOCAL FUNCTION DEFINITIONS\n; Function Prototype declaration\n\n----------------------------------------------------------------------------\n; LOCAL STORE/BUFFER/POINTER DEFINITIONS\n; Variable declaration - defined here and used outside this module\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; EXTERNAL FUNCTION REFERENCES\n; Declare functions defined elsewhere and referenced in this module\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES\n; Declare variables used in this module but defined elsewhere\n----------------------------------------------------------------------------*/\n#ifdef PV_POSTPROC_ON\n\n/*************************************************************************\n    Function prototype : void CombinedHorzVertFilter(   uint8 *rec,\n                                                        int width,\n                                                        int height,\n                                                        int *QP_store,\n                                                        int chr,\n                                                        uint8 *pp_mod)\n    Parameters  :\n        rec     :   pointer to the decoded frame buffer.\n        width   :   width of decoded frame.\n        height  :   height of decoded frame\n        QP_store:   pointer to the array of QP corresponding to the decoded frame.\n                    It had only one value for each MB.\n        chr     :   luma or color indication\n                    == 0 luma\n                    == 1 color\n        pp_mod  :   The semphore used for deblocking\n\n    Remark      :   The function do the deblocking on decoded frames.\n                    First based on the semaphore info., it is divided into hard and soft filtering.\n                    To differentiate real and fake edge, it then check the difference with QP to\n                    decide whether to do the filtering or not.\n\n*************************************************************************/\n\n\n/*----------------------------------------------------------------------------\n; FUNCTION CODE\n----------------------------------------------------------------------------*/\nvoid CombinedHorzVertFilter(\n    uint8 *rec,\n    int width,\n    int height,\n    int16 *QP_store,\n    int chr,\n    uint8 *pp_mod)\n{\n\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    int br, bc, mbr, mbc;\n    int QP = 1;\n    uint8 *ptr, *ptr_e;\n    int pp_w, pp_h;\n    int brwidth;\n\n    int jVal0, jVal1, jVal2;\n    /*----------------------------------------------------------------------------\n    ; Function body here\n    ----------------------------------------------------------------------------*/\n    pp_w = (width >> 3);\n    pp_h = (height >> 3);\n\n    for (mbr = 0; mbr < pp_h; mbr += 2)         /* row of blocks */\n    {\n        brwidth = mbr * pp_w;               /* number of blocks above current block row */\n        for (mbc = 0; mbc < pp_w; mbc += 2)     /* col of blocks */\n        {\n            if (!chr)\n                QP = QP_store[(brwidth>>2) + (mbc>>1)]; /* QP is per MB based value */\n\n            /********* for each block **************/\n            /****************** Horiz. Filtering ********************/\n            for (br = mbr + 1; br < mbr + 3; br++)  /* 2x2 blocks */\n            {\n                brwidth += pp_w;                    /* number of blocks above & left current block row */\n                /* the profile on ARM920T shows separate these two boundary check is faster than combine them */\n                if (br < pp_h)                  /* boundary : don't do it on the lowest row block */\n                    for (bc = mbc; bc < mbc + 2; bc++)\n                    {\n                        /****** check boundary for deblocking ************/\n                        if (bc < pp_w)              /* boundary : don't do it on the most right col block */\n                        {\n                            ptr = rec + (brwidth << 6) + (bc << 3);\n                            jVal0 = brwidth + bc;\n                            if (chr)    QP = QP_store[jVal0];\n\n                            ptr_e = ptr + 8;        /* pointer to where the loop ends */\n\n                            if (((pp_mod[jVal0]&0x02)) && ((pp_mod[jVal0-pp_w]&0x02)))\n                            {\n                                /* Horiz Hard filter */\n                                do\n                                {\n                                    jVal0 = *(ptr - width);     /* C */\n                                    jVal1 = *ptr;               /* D */\n                                    jVal2 = jVal1 - jVal0;\n\n                                    if (((jVal2 > 0) && (jVal2 < (QP << 1)))\n                                            || ((jVal2 < 0) && (jVal2 > -(QP << 1)))) /* (D-C) compared with 2QP */\n                                    {\n                                        /* differentiate between real and fake edge */\n                                        jVal0 = ((jVal0 + jVal1) >> 1);     /* (D+C)/2 */\n                                        *(ptr - width) = (uint8)(jVal0);    /*  C */\n                                        *ptr = (uint8)(jVal0);          /*  D */\n\n                                        jVal0 = *(ptr - (width << 1));      /* B */\n                                        jVal1 = *(ptr + width);         /* E */\n                                        jVal2 = jVal1 - jVal0;      /* E-B */\n\n                                        if (jVal2 > 0)\n                                        {\n                                            jVal0 += ((jVal2 + 3) >> 2);\n                                            jVal1 -= ((jVal2 + 3) >> 2);\n                                            *(ptr - (width << 1)) = (uint8)jVal0;       /*  store B */\n                                            *(ptr + width) = (uint8)jVal1;          /* store E */\n                                        }\n                                        else if (jVal2)\n                                        {\n                                            jVal0 -= ((3 - jVal2) >> 2);\n                                            jVal1 += ((3 - jVal2) >> 2);\n                                            *(ptr - (width << 1)) = (uint8)jVal0;       /*  store B */\n                                            *(ptr + width) = (uint8)jVal1;          /* store E */\n                                        }\n\n                                        jVal0 = *(ptr - (width << 1) - width);  /* A */\n                                        jVal1 = *(ptr + (width << 1));      /* F */\n                                        jVal2 = jVal1 - jVal0;              /* (F-A) */\n\n                                        if (jVal2 > 0)\n                                        {\n                                            jVal0 += ((jVal2 + 7) >> 3);\n                                            jVal1 -= ((jVal2 + 7) >> 3);\n                                            *(ptr - (width << 1) - width) = (uint8)(jVal0);\n                                            *(ptr + (width << 1)) = (uint8)(jVal1);\n                                        }\n                                        else if (jVal2)\n                                        {\n                                            jVal0 -= ((7 - jVal2) >> 3);\n                                            jVal1 += ((7 - jVal2) >> 3);\n                                            *(ptr - (width << 1) - width) = (uint8)(jVal0);\n                                            *(ptr + (width << 1)) = (uint8)(jVal1);\n                                        }\n                                    }/* a3_0 > 2QP */\n                                }\n                                while (++ptr < ptr_e);\n                            }\n                            else   /* Horiz soft filter*/\n                            {\n                                do\n                                {\n                                    jVal0 = *(ptr - width); /* B */\n                                    jVal1 = *ptr;           /* C */\n                                    jVal2 = jVal1 - jVal0;  /* C-B */\n\n                                    if (((jVal2 > 0) && (jVal2 < (QP)))\n                                            || ((jVal2 < 0) && (jVal2 > -(QP)))) /* (C-B) compared with QP */\n                                    {\n\n                                        jVal0 = ((jVal0 + jVal1) >> 1);     /* (B+C)/2 cannot overflow; ceil() */\n                                        *(ptr - width) = (uint8)(jVal0);    /* B = (B+C)/2 */\n                                        *ptr = (uint8)jVal0;            /* C = (B+C)/2 */\n\n                                        jVal0 = *(ptr - (width << 1));      /* A */\n                                        jVal1 = *(ptr + width);         /* D */\n                                        jVal2 = jVal1 - jVal0;          /* D-A */\n\n\n                                        if (jVal2 > 0)\n                                        {\n                                            jVal1 -= ((jVal2 + 7) >> 3);\n                                            jVal0 += ((jVal2 + 7) >> 3);\n                                            *(ptr - (width << 1)) = (uint8)jVal0;       /* A */\n                                            *(ptr + width) = (uint8)jVal1;          /* D */\n                                        }\n                                        else if (jVal2)\n                                        {\n                                            jVal1 += ((7 - jVal2) >> 3);\n                                            jVal0 -= ((7 - jVal2) >> 3);\n                                            *(ptr - (width << 1)) = (uint8)jVal0;       /* A */\n                                            *(ptr + width) = (uint8)jVal1;          /* D */\n                                        }\n                                    }\n                                }\n                                while (++ptr < ptr_e);\n                            } /* Soft filter*/\n                        }/* boundary checking*/\n                    }/*bc*/\n            }/*br*/\n            brwidth -= (pp_w << 1);\n            /****************** Vert. Filtering ********************/\n            for (br = mbr; br < mbr + 2; br++)\n            {\n                if (br < pp_h)\n                    for (bc = mbc + 1; bc < mbc + 3; bc++)\n                    {\n                        /****** check boundary for deblocking ************/\n                        if (bc < pp_w)\n                        {\n                            ptr = rec + (brwidth << 6) + (bc << 3);\n                            jVal0 = brwidth + bc;\n                            if (chr)    QP = QP_store[jVal0];\n\n                            ptr_e = ptr + (width << 3);\n\n                            if (((pp_mod[jVal0-1]&0x01)) && ((pp_mod[jVal0]&0x01)))\n                            {\n                                /* Vert Hard filter */\n                                do\n                                {\n                                    jVal1 = *ptr;       /* D */\n                                    jVal0 = *(ptr - 1); /* C */\n                                    jVal2 = jVal1 - jVal0;  /* D-C */\n\n                                    if (((jVal2 > 0) && (jVal2 < (QP << 1)))\n                                            || ((jVal2 < 0) && (jVal2 > -(QP << 1))))\n                                    {\n                                        jVal1 = (jVal0 + jVal1) >> 1;   /* (C+D)/2 */\n                                        *ptr        =   jVal1;\n                                        *(ptr - 1)  =   jVal1;\n\n                                        jVal1 = *(ptr + 1);     /* E */\n                                        jVal0 = *(ptr - 2);     /* B */\n                                        jVal2 = jVal1 - jVal0;      /* E-B */\n\n                                        if (jVal2 > 0)\n                                        {\n                                            jVal1 -= ((jVal2 + 3) >> 2);        /* E = E -(E-B)/4 */\n                                            jVal0 += ((jVal2 + 3) >> 2);        /* B = B +(E-B)/4 */\n                                            *(ptr + 1) = jVal1;\n                                            *(ptr - 2) = jVal0;\n                                        }\n                                        else if (jVal2)\n                                        {\n                                            jVal1 += ((3 - jVal2) >> 2);        /* E = E -(E-B)/4 */\n                                            jVal0 -= ((3 - jVal2) >> 2);        /* B = B +(E-B)/4 */\n                                            *(ptr + 1) = jVal1;\n                                            *(ptr - 2) = jVal0;\n                                        }\n\n                                        jVal1 = *(ptr + 2);     /* F */\n                                        jVal0 = *(ptr - 3);     /* A */\n\n                                        jVal2 = jVal1 - jVal0;          /* (F-A) */\n\n                                        if (jVal2 > 0)\n                                        {\n                                            jVal1 -= ((jVal2 + 7) >> 3);    /* F -= (F-A)/8 */\n                                            jVal0 += ((jVal2 + 7) >> 3);    /* A += (F-A)/8 */\n                                            *(ptr + 2) = jVal1;\n                                            *(ptr - 3) = jVal0;\n                                        }\n                                        else if (jVal2)\n                                        {\n                                            jVal1 -= ((jVal2 - 7) >> 3);    /* F -= (F-A)/8 */\n                                            jVal0 += ((jVal2 - 7) >> 3);    /* A += (F-A)/8 */\n                                            *(ptr + 2) = jVal1;\n                                            *(ptr - 3) = jVal0;\n                                        }\n                                    }   /* end of ver hard filetering */\n                                }\n                                while ((ptr += width) < ptr_e);\n                            }\n                            else   /* Vert soft filter*/\n                            {\n                                do\n                                {\n                                    jVal1 = *ptr;               /* C */\n                                    jVal0 = *(ptr - 1);         /* B */\n                                    jVal2 = jVal1 - jVal0;\n\n                                    if (((jVal2 > 0) && (jVal2 < (QP)))\n                                            || ((jVal2 < 0) && (jVal2 > -(QP))))\n                                    {\n\n                                        jVal1 = (jVal0 + jVal1 + 1) >> 1;\n                                        *ptr = jVal1;           /* C */\n                                        *(ptr - 1) = jVal1;     /* B */\n\n                                        jVal1 = *(ptr + 1);     /* D */\n                                        jVal0 = *(ptr - 2);     /* A */\n                                        jVal2 = (jVal1 - jVal0);        /* D- A */\n\n                                        if (jVal2 > 0)\n                                        {\n                                            jVal1 -= (((jVal2) + 7) >> 3);      /* D -= (D-A)/8 */\n                                            jVal0 += (((jVal2) + 7) >> 3);      /* A += (D-A)/8 */\n                                            *(ptr + 1) = jVal1;\n                                            *(ptr - 2) = jVal0;\n\n                                        }\n                                        else if (jVal2)\n                                        {\n                                            jVal1 += ((7 - (jVal2)) >> 3);      /* D -= (D-A)/8 */\n                                            jVal0 -= ((7 - (jVal2)) >> 3);      /* A += (D-A)/8 */\n                                            *(ptr + 1) = jVal1;\n                                            *(ptr - 2) = jVal0;\n                                        }\n                                    }\n                                }\n                                while ((ptr += width) < ptr_e);\n                            } /* Soft filter*/\n                        } /* boundary*/\n                    } /*bc*/\n                brwidth += pp_w;\n            }/*br*/\n            brwidth -= (pp_w << 1);\n        }/*mbc*/\n        brwidth += (pp_w << 1);\n    }/*mbr*/\n    /*----------------------------------------------------------------------------\n    ; Return nothing or data or data pointer\n    ----------------------------------------------------------------------------*/\n    return;\n}\nvoid CombinedHorzVertFilter_NoSoftDeblocking(\n    uint8 *rec,\n    int width,\n    int height,\n    int16 *QP_store,\n    int chr,\n    uint8 *pp_mod)\n{\n\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    int br, bc, mbr, mbc;\n    int QP = 1;\n    uint8 *ptr, *ptr_e;\n    int pp_w, pp_h;\n    int brwidth;\n\n    int jVal0, jVal1, jVal2;\n    /*----------------------------------------------------------------------------\n    ; Function body here\n    ----------------------------------------------------------------------------*/\n    pp_w = (width >> 3);\n    pp_h = (height >> 3);\n\n    for (mbr = 0; mbr < pp_h; mbr += 2)         /* row of blocks */\n    {\n        brwidth = mbr * pp_w;               /* number of blocks above current block row */\n        for (mbc = 0; mbc < pp_w; mbc += 2)     /* col of blocks */\n        {\n            if (!chr)\n                QP = QP_store[(brwidth>>2) + (mbc>>1)]; /* QP is per MB based value */\n\n            /********* for each block **************/\n            /****************** Horiz. Filtering ********************/\n            for (br = mbr + 1; br < mbr + 3; br++)  /* 2x2 blocks */\n            {\n                brwidth += pp_w;                    /* number of blocks above & left current block row */\n                /* the profile on ARM920T shows separate these two boundary check is faster than combine them */\n                if (br < pp_h)                  /* boundary : don't do it on the lowest row block */\n                    for (bc = mbc; bc < mbc + 2; bc++)\n                    {\n                        /****** check boundary for deblocking ************/\n                        if (bc < pp_w)              /* boundary : don't do it on the most right col block */\n                        {\n                            ptr = rec + (brwidth << 6) + (bc << 3);\n                            jVal0 = brwidth + bc;\n                            if (chr)    QP = QP_store[jVal0];\n\n                            ptr_e = ptr + 8;        /* pointer to where the loop ends */\n\n                            if (((pp_mod[jVal0]&0x02)) && ((pp_mod[jVal0-pp_w]&0x02)))\n                            {\n                                /* Horiz Hard filter */\n                                do\n                                {\n                                    jVal0 = *(ptr - width);     /* C */\n                                    jVal1 = *ptr;               /* D */\n                                    jVal2 = jVal1 - jVal0;\n\n                                    if (((jVal2 > 0) && (jVal2 < (QP << 1)))\n                                            || ((jVal2 < 0) && (jVal2 > -(QP << 1)))) /* (D-C) compared with 2QP */\n                                    {\n                                        /* differentiate between real and fake edge */\n                                        jVal0 = ((jVal0 + jVal1) >> 1);     /* (D+C)/2 */\n                                        *(ptr - width) = (uint8)(jVal0);    /*  C */\n                                        *ptr = (uint8)(jVal0);          /*  D */\n\n                                        jVal0 = *(ptr - (width << 1));      /* B */\n                                        jVal1 = *(ptr + width);         /* E */\n                                        jVal2 = jVal1 - jVal0;      /* E-B */\n\n                                        if (jVal2 > 0)\n                                        {\n                                            jVal0 += ((jVal2 + 3) >> 2);\n                                            jVal1 -= ((jVal2 + 3) >> 2);\n                                            *(ptr - (width << 1)) = (uint8)jVal0;       /*  store B */\n                                            *(ptr + width) = (uint8)jVal1;          /* store E */\n                                        }\n                                        else if (jVal2)\n                                        {\n                                            jVal0 -= ((3 - jVal2) >> 2);\n                                            jVal1 += ((3 - jVal2) >> 2);\n                                            *(ptr - (width << 1)) = (uint8)jVal0;       /*  store B */\n                                            *(ptr + width) = (uint8)jVal1;          /* store E */\n                                        }\n\n                                        jVal0 = *(ptr - (width << 1) - width);  /* A */\n                                        jVal1 = *(ptr + (width << 1));      /* F */\n                                        jVal2 = jVal1 - jVal0;              /* (F-A) */\n\n                                        if (jVal2 > 0)\n                                        {\n                                            jVal0 += ((jVal2 + 7) >> 3);\n                                            jVal1 -= ((jVal2 + 7) >> 3);\n                                            *(ptr - (width << 1) - width) = (uint8)(jVal0);\n                                            *(ptr + (width << 1)) = (uint8)(jVal1);\n                                        }\n                                        else if (jVal2)\n                                        {\n                                            jVal0 -= ((7 - jVal2) >> 3);\n                                            jVal1 += ((7 - jVal2) >> 3);\n                                            *(ptr - (width << 1) - width) = (uint8)(jVal0);\n                                            *(ptr + (width << 1)) = (uint8)(jVal1);\n                                        }\n                                    }/* a3_0 > 2QP */\n                                }\n                                while (++ptr < ptr_e);\n                            }\n\n                        }/* boundary checking*/\n                    }/*bc*/\n            }/*br*/\n            brwidth -= (pp_w << 1);\n            /****************** Vert. Filtering ********************/\n            for (br = mbr; br < mbr + 2; br++)\n            {\n                if (br < pp_h)\n                    for (bc = mbc + 1; bc < mbc + 3; bc++)\n                    {\n                        /****** check boundary for deblocking ************/\n                        if (bc < pp_w)\n                        {\n                            ptr = rec + (brwidth << 6) + (bc << 3);\n                            jVal0 = brwidth + bc;\n                            if (chr)    QP = QP_store[jVal0];\n\n                            ptr_e = ptr + (width << 3);\n\n                            if (((pp_mod[jVal0-1]&0x01)) && ((pp_mod[jVal0]&0x01)))\n                            {\n                                /* Vert Hard filter */\n                                do\n                                {\n                                    jVal1 = *ptr;       /* D */\n                                    jVal0 = *(ptr - 1); /* C */\n                                    jVal2 = jVal1 - jVal0;  /* D-C */\n\n                                    if (((jVal2 > 0) && (jVal2 < (QP << 1)))\n                                            || ((jVal2 < 0) && (jVal2 > -(QP << 1))))\n                                    {\n                                        jVal1 = (jVal0 + jVal1) >> 1;   /* (C+D)/2 */\n                                        *ptr        =   jVal1;\n                                        *(ptr - 1)  =   jVal1;\n\n                                        jVal1 = *(ptr + 1);     /* E */\n                                        jVal0 = *(ptr - 2);     /* B */\n                                        jVal2 = jVal1 - jVal0;      /* E-B */\n\n                                        if (jVal2 > 0)\n                                        {\n                                            jVal1 -= ((jVal2 + 3) >> 2);        /* E = E -(E-B)/4 */\n                                            jVal0 += ((jVal2 + 3) >> 2);        /* B = B +(E-B)/4 */\n                                            *(ptr + 1) = jVal1;\n                                            *(ptr - 2) = jVal0;\n                                        }\n                                        else if (jVal2)\n                                        {\n                                            jVal1 += ((3 - jVal2) >> 2);        /* E = E -(E-B)/4 */\n                                            jVal0 -= ((3 - jVal2) >> 2);        /* B = B +(E-B)/4 */\n                                            *(ptr + 1) = jVal1;\n                                            *(ptr - 2) = jVal0;\n                                        }\n\n                                        jVal1 = *(ptr + 2);     /* F */\n                                        jVal0 = *(ptr - 3);     /* A */\n\n                                        jVal2 = jVal1 - jVal0;          /* (F-A) */\n\n                                        if (jVal2 > 0)\n                                        {\n                                            jVal1 -= ((jVal2 + 7) >> 3);    /* F -= (F-A)/8 */\n                                            jVal0 += ((jVal2 + 7) >> 3);    /* A += (F-A)/8 */\n                                            *(ptr + 2) = jVal1;\n                                            *(ptr - 3) = jVal0;\n                                        }\n                                        else if (jVal2)\n                                        {\n                                            jVal1 -= ((jVal2 - 7) >> 3);    /* F -= (F-A)/8 */\n                                            jVal0 += ((jVal2 - 7) >> 3);    /* A += (F-A)/8 */\n                                            *(ptr + 2) = jVal1;\n                                            *(ptr - 3) = jVal0;\n                                        }\n                                    }   /* end of ver hard filetering */\n                                }\n                                while ((ptr += width) < ptr_e);\n                            }\n\n                        } /* boundary*/\n                    } /*bc*/\n                brwidth += pp_w;\n            }/*br*/\n            brwidth -= (pp_w << 1);\n        }/*mbc*/\n        brwidth += (pp_w << 1);\n    }/*mbr*/\n    /*----------------------------------------------------------------------------\n    ; Return nothing or data or data pointer\n    ----------------------------------------------------------------------------*/\n    return;\n}\n#endif\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/chvr_filter.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include    \"mp4dec_lib.h\"\n#include    \"post_proc.h\"\n\n#ifdef PV_POSTPROC_ON\n\nvoid CombinedHorzVertRingFilter(\n    uint8 *rec,\n    int width,\n    int height,\n    int16 *QP_store,\n    int chr,\n    uint8 *pp_mod)\n{\n\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    int index, counter;\n    int br, bc, incr, mbr, mbc;\n    int QP = 1;\n    int v[5];\n    uint8 *ptr, *ptr_c, *ptr_n;\n    int w1, w2, w3, w4;\n    int pp_w, pp_h, brwidth;\n    int sum, delta;\n    int a3_0, a3_1, a3_2, A3_0;\n    /* for Deringing Threshold approach (MPEG4)*/\n    int max_diff, thres, v0, h0, min_blk, max_blk;\n    int cnthflag;\n\n    /*----------------------------------------------------------------------------\n    ; Function body here\n    ----------------------------------------------------------------------------*/\n    /* Calculate the width and height of the area in blocks (divide by 8) */\n    pp_w = (width >> 3);\n    pp_h = (height >> 3);\n\n    /* Set up various values needed for updating pointers into rec */\n    w1 = width;             /* Offset to next row in pixels */\n    w2 = width << 1;        /* Offset to two rows in pixels */\n    w3 = w1 + w2;           /* Offset to three rows in pixels */\n    w4 = w2 << 1;           /* Offset to four rows in pixels */\n    incr = width - BLKSIZE; /* Offset to next row after processing block */\n\n    /* Work through the area hortizontally by two rows per step */\n    for (mbr = 0; mbr < pp_h; mbr += 2)\n    {\n        /* brwidth contains the block number of the leftmost block\n         * of the current row */\n        brwidth = mbr * pp_w;\n\n        /* Work through the area vertically by two columns per step */\n        for (mbc = 0; mbc < pp_w; mbc += 2)\n        {\n            /* if the data is luminance info, get the correct\n                    * quantization paramenter. One parameter per macroblock */\n            if (!chr)\n            {\n                /* brwidth/4 is the macroblock number and mbc/2 is the macroblock col number*/\n                QP = QP_store[(brwidth>>2) + (mbc>>1)];\n            }\n\n            /****************** Horiz. Filtering ********************/\n            /* Process four blocks for the filtering        */\n            /********************************************************/\n            /* Loop over two rows of blocks */\n            for (br = mbr + 1; br < mbr + 3; br++)    /* br is the row counter in blocks */\n            {\n                /* Set brwidth to the first (leftmost) block number of the next row */\n                /* brwidth is used as an index when counting blocks */\n                brwidth += pp_w;\n\n                /* Loop over two columns of blocks in the row */\n                for (bc = mbc; bc < mbc + 2; bc++)    /* bc is the column counter in blocks */\n                {\n                    /****** check boundary for deblocking ************/\n                    /* Execute if the row and column counters are within the area */\n                    if (br < pp_h && bc < pp_w)\n                    {\n                        /* Set the ptr to the first pixel of the first block of the second row\n                        * brwidth * 64 is the pixel row offset\n                        * bc * 8 is the pixel column offset */\n                        ptr = rec + (brwidth << 6) + (bc << 3);\n\n                        /* Set the index to the current block of the second row counting in blocks */\n                        index = brwidth + bc;\n\n                        /* if the data is chrominance info, get the correct\n                         * quantization paramenter. One parameter per block. */\n                        if (chr)\n                        {\n                            QP = QP_store[index];\n                        }\n\n                        /* Execute hard horizontal filter if semaphore for horizontal deblocking\n                          * is set for the current block and block immediately above it */\n                        if (((pp_mod[index]&0x02) != 0) && ((pp_mod[index-pp_w]&0x02) != 0))\n                        {   /* Hard filter */\n\n                            /* Set HorzHflag (bit 4) in the pp_mod location */\n                            pp_mod[index-pp_w] |= 0x10; /*  4/26/00 reuse pp_mod for HorzHflag*/\n\n                            /* Filter across the 8 pixels of the block */\n                            for (index = BLKSIZE; index > 0; index--)\n                            {\n                                /* Difference between the current pixel and the pixel above it */\n                                a3_0 = *ptr - *(ptr - w1);\n\n                                /* if the magnitude of the difference is greater than the KThH threshold\n                                 * and within the quantization parameter, apply hard filter */\n                                if ((a3_0 > KThH || a3_0 < -KThH) && a3_0<QP && a3_0> -QP)\n                                {\n                                    ptr_c = ptr - w3;   /* Points to pixel three rows above */\n                                    ptr_n = ptr + w1;   /* Points to pixel one row below */\n                                    v[0] = (int)(*(ptr_c - w3));\n                                    v[1] = (int)(*(ptr_c - w2));\n                                    v[2] = (int)(*(ptr_c - w1));\n                                    v[3] = (int)(*ptr_c);\n                                    v[4] = (int)(*(ptr_c + w1));\n\n                                    sum = v[0]\n                                          + v[1]\n                                          + v[2]\n                                          + *ptr_c\n                                          + v[4]\n                                          + (*(ptr_c + w2))\n                                          + (*(ptr_c + w3));  /* Current pixel */\n\n                                    delta = (sum + *ptr_c + 4) >> 3;   /* Average pixel values with rounding */\n                                    *(ptr_c) = (uint8) delta;\n\n                                    /* Move pointer down one row of pixels (points to pixel two rows\n                                     * above current pixel) */\n                                    ptr_c += w1;\n\n                                    for (counter = 0; counter < 5; counter++)\n                                    {\n                                        /* Subtract off highest pixel and add in pixel below */\n                                        sum = sum - v[counter] + *ptr_n;\n                                        /* Average the pixel values with rounding */\n                                        delta = (sum + *ptr_c + 4) >> 3;\n                                        *ptr_c = (uint8)(delta);\n\n                                        /* Increment pointers to next pixel row */\n                                        ptr_c += w1;\n                                        ptr_n += w1;\n                                    }\n                                }\n                                /* Increment pointer to next pixel */\n                                ++ptr;\n                            } /* index*/\n                        }\n                        else\n                        { /* soft filter*/\n\n                            /* Clear HorzHflag (bit 4) in the pp_mod location */\n                            pp_mod[index-pp_w] &= 0xef; /* reset 1110,1111 */\n\n                            for (index = BLKSIZE; index > 0; index--)\n                            {\n                                /* Difference between the current pixel and the pixel above it */\n                                a3_0 = *(ptr) - *(ptr - w1);\n\n                                /* if the magnitude of the difference is greater than the KTh threshold,\n                                 * apply soft filter */\n                                if ((a3_0 > KTh || a3_0 < -KTh))\n                                {\n\n                                    /* Sum of weighted differences */\n                                    a3_0 += ((*(ptr - w2) - *(ptr + w1)) << 1) + (a3_0 << 2);\n\n                                    /* Check if sum is less than the quantization parameter */\n                                    if (PV_ABS(a3_0) < (QP << 3))\n                                    {\n                                        a3_1 = *(ptr - w2) - *(ptr - w3);\n                                        a3_1 += ((*(ptr - w4) - *(ptr - w1)) << 1) + (a3_1 << 2);\n\n                                        a3_2  = *(ptr + w2) - *(ptr + w1);\n                                        a3_2 += ((*(ptr) - *(ptr + w3)) << 1) + (a3_2 << 2);\n\n                                        A3_0 = PV_ABS(a3_0) - PV_MIN(PV_ABS(a3_1), PV_ABS(a3_2));\n\n                                        if (A3_0 > 0)\n                                        {\n                                            A3_0 += A3_0 << 2;\n                                            A3_0 = (A3_0 + 32) >> 6;\n                                            if (a3_0 > 0)\n                                            {\n                                                A3_0 = -A3_0;\n                                            }\n\n                                            delta = (*(ptr - w1) - *(ptr)) >> 1;\n                                            if (delta >= 0)\n                                            {\n                                                if (delta >= A3_0)\n                                                {\n                                                    delta = PV_MAX(A3_0, 0);\n                                                }\n                                            }\n                                            else\n                                            {\n                                                if (A3_0 > 0)\n                                                {\n                                                    delta = 0;\n                                                }\n                                                else\n                                                {\n                                                    delta = PV_MAX(A3_0, delta);\n                                                }\n                                            }\n\n                                            *(ptr - w1) = (uint8)(*(ptr - w1) - delta);\n                                            *(ptr) = (uint8)(*(ptr) + delta);\n                                        }\n                                    } /*threshold*/\n                                }\n                                /* Increment pointer to next pixel */\n                                ++ptr;\n                            } /*index*/\n                        } /* Soft filter*/\n                    }/* boundary checking*/\n                }/*bc*/\n            }/*br*/\n            brwidth -= (pp_w << 1);\n\n\n            /****************** Vert. Filtering *********************/\n            /* Process four blocks for the filtering        */\n            /********************************************************/\n            /* Loop over two rows of blocks */\n            for (br = mbr; br < mbr + 2; br++)      /* br is the row counter in blocks */\n            {\n                for (bc = mbc + 1; bc < mbc + 3; bc++)  /* bc is the column counter in blocks */\n                {\n                    /****** check boundary for deblocking ************/\n                    /* Execute if the row and column counters are within the area */\n                    if (br < pp_h && bc < pp_w)\n                    {\n                        /* Set the ptr to the first pixel of the first block of the second row\n                        * brwidth * 64 is the pixel row offset\n                        * bc * 8 is the pixel column offset */\n                        ptr = rec + (brwidth << 6) + (bc << 3);\n\n                        /* Set the index to the current block of the second row counting in blocks */\n                        index = brwidth + bc;\n\n                        /* if the data is chrominance info, get the correct\n                         * quantization paramenter. One parameter per block. */\n                        if (chr)\n                        {\n                            QP = QP_store[index];\n                        }\n\n                        /* Execute hard vertical filter if semaphore for vertical deblocking\n                          * is set for the current block and block immediately left of it */\n                        if (((pp_mod[index-1]&0x01) != 0) && ((pp_mod[index]&0x01) != 0))\n                        {   /* Hard filter */\n\n                            /* Set VertHflag (bit 5) in the pp_mod location of previous block*/\n                            pp_mod[index-1] |= 0x20; /*  4/26/00 reuse pp_mod for VertHflag*/\n\n                            /* Filter across the 8 pixels of the block */\n                            for (index = BLKSIZE; index > 0; index--)\n                            {\n                                /* Difference between the current pixel\n                                * and the pixel to left of it */\n                                a3_0 = *ptr - *(ptr - 1);\n\n                                /* if the magnitude of the difference is greater than the KThH threshold\n                                 * and within the quantization parameter, apply hard filter */\n                                if ((a3_0 > KThH || a3_0 < -KThH) && a3_0<QP && a3_0> -QP)\n                                {\n                                    ptr_c = ptr - 3;\n                                    ptr_n = ptr + 1;\n                                    v[0] = (int)(*(ptr_c - 3));\n                                    v[1] = (int)(*(ptr_c - 2));\n                                    v[2] = (int)(*(ptr_c - 1));\n                                    v[3] = (int)(*ptr_c);\n                                    v[4] = (int)(*(ptr_c + 1));\n\n                                    sum = v[0]\n                                          + v[1]\n                                          + v[2]\n                                          + *ptr_c\n                                          + v[4]\n                                          + (*(ptr_c + 2))\n                                          + (*(ptr_c + 3));\n\n                                    delta = (sum + *ptr_c + 4) >> 3;\n                                    *(ptr_c) = (uint8) delta;\n\n                                    /* Move pointer down one pixel to the right */\n                                    ptr_c += 1;\n                                    for (counter = 0; counter < 5; counter++)\n                                    {\n                                        /* Subtract off highest pixel and add in pixel below */\n                                        sum = sum - v[counter] + *ptr_n;\n                                        /* Average the pixel values with rounding */\n                                        delta = (sum + *ptr_c + 4) >> 3;\n                                        *ptr_c = (uint8)(delta);\n\n                                        /* Increment pointers to next pixel */\n                                        ptr_c += 1;\n                                        ptr_n += 1;\n                                    }\n                                }\n                                /* Increment pointers to next pixel row */\n                                ptr += w1;\n                            } /* index*/\n                        }\n                        else\n                        { /* soft filter*/\n\n                            /* Clear VertHflag (bit 5) in the pp_mod location */\n                            pp_mod[index-1] &= 0xdf; /* reset 1101,1111 */\n                            for (index = BLKSIZE; index > 0; index--)\n                            {\n                                /* Difference between the current pixel and the pixel above it */\n                                a3_0 = *(ptr) - *(ptr - 1);\n\n                                /* if the magnitude of the difference is greater than the KTh threshold,\n                                 * apply soft filter */\n                                if ((a3_0 > KTh || a3_0 < -KTh))\n                                {\n\n                                    /* Sum of weighted differences */\n                                    a3_0 += ((*(ptr - 2) - *(ptr + 1)) << 1) + (a3_0 << 2);\n\n                                    /* Check if sum is less than the quantization parameter */\n                                    if (PV_ABS(a3_0) < (QP << 3))\n                                    {\n                                        a3_1 = *(ptr - 2) - *(ptr - 3);\n                                        a3_1 += ((*(ptr - 4) - *(ptr - 1)) << 1) + (a3_1 << 2);\n\n                                        a3_2  = *(ptr + 2) - *(ptr + 1);\n                                        a3_2 += ((*(ptr) - *(ptr + 3)) << 1) + (a3_2 << 2);\n\n                                        A3_0 = PV_ABS(a3_0) - PV_MIN(PV_ABS(a3_1), PV_ABS(a3_2));\n\n                                        if (A3_0 > 0)\n                                        {\n                                            A3_0 += A3_0 << 2;\n                                            A3_0 = (A3_0 + 32) >> 6;\n                                            if (a3_0 > 0)\n                                            {\n                                                A3_0 = -A3_0;\n                                            }\n\n                                            delta = (*(ptr - 1) - *(ptr)) >> 1;\n                                            if (delta >= 0)\n                                            {\n                                                if (delta >= A3_0)\n                                                {\n                                                    delta = PV_MAX(A3_0, 0);\n                                                }\n                                            }\n                                            else\n                                            {\n                                                if (A3_0 > 0)\n                                                {\n                                                    delta = 0;\n                                                }\n                                                else\n                                                {\n                                                    delta = PV_MAX(A3_0, delta);\n                                                }\n                                            }\n\n                                            *(ptr - 1) = (uint8)(*(ptr - 1) - delta);\n                                            *(ptr) = (uint8)(*(ptr) + delta);\n                                        }\n                                    } /*threshold*/\n                                }\n                                ptr += w1;\n                            } /*index*/\n                        } /* Soft filter*/\n                    } /* boundary*/\n                } /*bc*/\n                /* Increment pointer to next row of pixels */\n                brwidth += pp_w;\n            }/*br*/\n            brwidth -= (pp_w << 1);\n\n            /****************** Deringing ***************************/\n            /* Process four blocks for the filtering        */\n            /********************************************************/\n            /* Loop over two rows of blocks */\n            for (br = mbr; br < mbr + 2; br++)\n            {\n                /* Loop over two columns of blocks in the row */\n                for (bc = mbc; bc < mbc + 2; bc++)\n                {\n                    /* Execute if the row and column counters are within the area */\n                    if (br < pp_h && bc < pp_w)\n                    {\n                        /* Set the index to the current block */\n                        index = brwidth + bc;\n\n                        /* Execute deringing if semaphore for deringing (bit-3 of pp_mod)\n                         * is set for the current block */\n                        if ((pp_mod[index]&0x04) != 0)\n                        {\n                            /* Don't process deringing if on an edge block */\n                            if (br > 0 && bc > 0 && br < pp_h - 1 && bc < pp_w - 1)\n                            {\n                                /* cnthflag = weighted average of HorzHflag of current,\n                                 * one above, previous blocks*/\n                                cnthflag = ((pp_mod[index] & 0x10) +\n                                            (pp_mod[index-pp_w] & 0x10) +\n                                            ((pp_mod[index-1] >> 1) & 0x10) +\n                                            ((pp_mod[index] >> 1) & 0x10)) >> 4; /* 4/26/00*/\n\n                                /* Do the deringing if decision flags indicate it's necessary */\n                                if (cnthflag < 3)\n                                {\n                                    /* if the data is chrominance info, get the correct\n                                     * quantization paramenter. One parameter per block. */\n                                    if (chr)\n                                    {\n                                        QP = QP_store[index];\n                                    }\n\n                                    /* Set amount to change luminance if it needs to be changed\n                                     * based on quantization parameter */\n                                    max_diff = (QP >> 2) + 4;\n\n                                    /* Set pointer to first pixel of current block */\n                                    ptr = rec + (brwidth << 6) + (bc << 3);\n\n                                    /* Find minimum and maximum value of pixel block */\n                                    FindMaxMin(ptr, &min_blk, &max_blk, incr);\n\n                                    /* threshold determination */\n                                    thres = (max_blk + min_blk + 1) >> 1;\n\n                                    /* If pixel range is greater or equal than DERING_THR, smooth the region */\n                                    if ((max_blk - min_blk) >= DERING_THR) /*smooth 8x8 region*/\n#ifndef NoMMX\n                                    {\n                                        /* smooth all pixels in the block*/\n                                        DeringAdaptiveSmoothMMX(ptr, width, thres, max_diff);\n                                    }\n#else\n                                    {\n                                        /* Setup the starting point of the region to smooth */\n                                        v0 = (br << 3) - 1;\n                                        h0 = (bc << 3) - 1;\n\n                                        /*smooth 8x8 region*/\n                                        AdaptiveSmooth_NoMMX(rec, v0, h0, v0 + 1, h0 + 1, thres, width, max_diff);\n                                    }\n#endif\n                                }/*cnthflag*/\n                            } /*dering br==1 or bc==1 (boundary block)*/\n                            else    /* Process the boundary blocks */\n                            {\n                                /* Decide to perform deblocking based on the semaphore flags\n                                   * of the neighboring blocks in each case. A certain number of\n                                 * hard filtering flags have to be set in order to signal need\n                                 * for smoothing */\n                                if (br > 0 && br < pp_h - 1)\n                                {\n                                    if (bc > 0)\n                                    {\n                                        cnthflag = ((pp_mod[index-pp_w] & 0x10) +\n                                                    (pp_mod[index] & 0x10) +\n                                                    ((pp_mod[index-1] >> 1) & 0x10)) >> 4;\n                                    }\n                                    else\n                                    {\n                                        cnthflag = ((pp_mod[index] & 0x10) +\n                                                    (pp_mod[index-pp_w] & 0x10) +\n                                                    ((pp_mod[index] >> 1) & 0x10)) >> 4;\n                                    }\n                                }\n                                else if (bc > 0 && bc < pp_w - 1)\n                                {\n                                    if (br > 0)\n                                    {\n                                        cnthflag = ((pp_mod[index-pp_w] & 0x10) +\n                                                    ((pp_mod[index-1] >> 1) & 0x10) +\n                                                    ((pp_mod[index] >> 1) & 0x10)) >> 4;\n                                    }\n                                    else\n                                    {\n                                        cnthflag = ((pp_mod[index] & 0x10) +\n                                                    ((pp_mod[index-1] >> 1) & 0x10) +\n                                                    ((pp_mod[index] >> 1) & 0x10)) >> 4;\n                                    }\n                                }\n                                else /* at the corner do default*/\n                                {\n                                    cnthflag = 0;\n                                }\n\n                                /* Do the deringing if decision flags indicate it's necessary */\n                                if (cnthflag < 2)\n                                {\n\n                                    /* if the data is chrominance info, get the correct\n                                                         * quantization paramenter. One parameter per block. */\n                                    if (chr)\n                                    {\n                                        QP = QP_store[index];\n                                    }\n\n                                    /* Set amount to change luminance if it needs to be changed\n                                     * based on quantization parameter */\n                                    max_diff = (QP >> 2) + 4;\n\n                                    /* Set pointer to first pixel of current block */\n                                    ptr = rec + (brwidth << 6) + (bc << 3);\n\n                                    /* Find minimum and maximum value of pixel block */\n                                    FindMaxMin(ptr, &min_blk, &max_blk, incr);\n\n                                    /* threshold determination */\n                                    thres = (max_blk + min_blk + 1) >> 1;\n\n                                    /* Setup the starting point of the region to smooth\n                                     * This is going to be a 4x4 region */\n                                    v0 = (br << 3) + 1;\n                                    h0 = (bc << 3) + 1;\n\n                                    /* If pixel range is greater or equal than DERING_THR, smooth the region */\n                                    if ((max_blk - min_blk) >= DERING_THR)\n                                    {\n                                        /* Smooth 4x4 region */\n                                        AdaptiveSmooth_NoMMX(rec, v0, h0, v0 - 3, h0 - 3, thres, width, max_diff);\n                                    }\n                                }/*cnthflag*/\n                            } /* br==0, bc==0*/\n                        }  /* dering*/\n                    } /*boundary condition*/\n                }/*bc*/\n                brwidth += pp_w;\n            }/*br*/\n            brwidth -= (pp_w << 1);\n        }/*mbc*/\n        brwidth += (pp_w << 1);\n    }/*mbr*/\n\n    /*----------------------------------------------------------------------------\n    ; Return nothing or data or data pointer\n    ----------------------------------------------------------------------------*/\n    return ;\n}\n#endif\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 2009 OrangeLabs\n *\n * Author: Alexis Gilabert Senar\n * Date: 2009-07-01\n * -------------------------------------------------------------------\n */\n#define LOG_TAG \"NativeDec\"\n#include <stdio.h>\n#include <stdlib.h>\n#include \"com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder.h\"\n#include \"mp4dec_api.h\"\n#include \"3GPVideoParser.h\"\n#include \"yuv2rgb.h\"\n\n/*\n * Global variables\n *\n*/\n  VideoDecControls iDecoderControl;\n  uint8* pFrame0,*pFrame1;\n  int32 nLayers = 1;\n  uint8** volbuf;\n  int32 volbuf_size[]= {0};\n  int32 iHeight = 0;\n  int32 iWidth = 0;\n  uint32 FrameSize = 0;\n  uint32 VideoDecOutputSize = 0;\n  MP4DecodingMode mode= H263_MODE;\n  /* Parser */\n  uint8* aOutBuffer;\n  uint32 aOutBufferSize = 0;\n  uint32 aOutTimestamp = 0;\n  int parserInitialized = 0;\n  int decoderInitialized = 0;\n  bool Status = false;\n\n/**\n * De-Init decoder\n */\nint deinitDecoder(){\n\tif (PVCleanUpVideoDecoder(&iDecoderControl)== 1) {\n\t\tif (pFrame0) free(pFrame0);\n\t\tif (pFrame1) free(pFrame1);\n\t\tdecoderInitialized = 0;\n\t\treturn 1;\n\t} else {\n\t\tif (pFrame0) free(pFrame0);\n\t\tif (pFrame1) free(pFrame1);\n\t\tdecoderInitialized = 0;\n\t\treturn 0;\n\t}\n}\n\n/**\n * Init decoder\n * @param srcWidth video width\n * @param srcHeight video height\n */\nint initDecoder(int srcWidth, int srcHeight){\n\tif (decoderInitialized == 1) deinitDecoder();\n\tiWidth = srcWidth;\n\tiHeight = srcHeight;\n\tFrameSize = (srcWidth * srcHeight);\n\tVideoDecOutputSize = (FrameSize * 3) >> 1;\n\tvolbuf_size[0]= VideoDecOutputSize;\n\tvolbuf = (uint8**)malloc(nLayers * sizeof(uint8*));\n\tif (volbuf == NULL) return 0;\n\tvolbuf[0] = (uint8*)malloc(volbuf_size[0]);\n\tif (volbuf[0] == NULL) return 0;\n\tmemset(*volbuf,0,volbuf_size[0]);\n\tif (!PVInitVideoDecoder(&iDecoderControl,volbuf, volbuf_size, nLayers, iWidth, iHeight, mode)) return 0;\n\tPVSetPostProcType(&iDecoderControl,2);\n\tpFrame0 = (uint8*) malloc(VideoDecOutputSize);\n\tif (pFrame0 == NULL) return 0;\n\tpFrame1 = (uint8*) malloc(VideoDecOutputSize);\n\tif (pFrame1 == NULL) return 0;\n\tmemset(pFrame1,0,VideoDecOutputSize);\n\tPVSetReferenceYUV(&iDecoderControl, pFrame1);\n\tdecoderInitialized = 1;\n\treturn decoderInitialized;\n }\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder\n * Method:    InitDecoder\n * Signature: (II)I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_InitDecoder\n  (JNIEnv * env, jclass clazz, jint srcWidth, jint srcHeight){\n\treturn initDecoder(srcWidth,srcHeight);\n}\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder\n * Method:    DeinitDecoder\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_DeinitDecoder\n  (JNIEnv * env, jclass clazz){\n\treturn deinitDecoder();\n}\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder\n * Method:    DecodeAndConvert\n * Signature: ([B[IJ)[I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_DecodeAndConvert\n  (JNIEnv *env, jclass clazz, jbyteArray h263Frame, jintArray decoded, jlong jtimestamp){\n\n\t/* Return if decoder is not initialized */\n\tif (!decoderInitialized){\n\t\treturn 0;\n\t}\n\n\t/* Set volbuf with h263Frame data*/\n\tjint len = env->GetArrayLength(h263Frame);\n\tjbyte data[len];\n\tenv->GetByteArrayRegion(h263Frame, 0, len, data);\n\n\t/* Decode */\n\tuint32 timestamp[]={(uint32)(jtimestamp & 0xFFFFFFFF)};\n\tuint usetimestamp[]={0};\n\tvolbuf[0] = (uint8*)data;\n\tvolbuf_size[0]=len;\n\n\tif (PVDecodeVideoFrame(&iDecoderControl, volbuf,timestamp,volbuf_size,usetimestamp,pFrame0) == 0){\n\t\treturn 0;\n\t}\n\n\t/* Copy result to YUV  array ! */\n\tuint8* decodedFrame = iDecoderControl.outputFrame;\n\tuint8* pTempFrame;\n\tpTempFrame = (uint8*) pFrame0;\n\tpFrame0 = (uint8*) pFrame1;\n\tpFrame1 = (uint8*) pTempFrame;\n\n\t/* Create the output buffer */\n\tuint32* resultBuffer= (uint32*) malloc(iWidth*iHeight*sizeof(uint32));\n\tif (resultBuffer == NULL) return 0;\n\n\t/***********  Convert to rgb  ***************/\n\tif (convert(iWidth,iHeight,decodedFrame,resultBuffer) == 0){\n\t  return 0;\n\t}\n\n\t/* Return Bitmap image */\n\t(env)->SetIntArrayRegion(decoded, 0, iWidth*iHeight, (const jint*)resultBuffer);\n\tfree(resultBuffer);\n\treturn 1;\n\n}\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder\n * Method:    InitParser\n * Signature: (Ljava/lang/String;)I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_InitParser\n  (JNIEnv *env, jclass clazz, jstring pathToFile){\n\n  char *str;\n  str = (char*)env->GetStringUTFChars(pathToFile, NULL);\n  if (str == NULL) return 0;\n\n  /* Init parser */\n  if (parserInitialized == 1){\n\t  parserInitialized = 0;\n\t  release();\n  }\n  if(Init3GPVideoParser(str) == 1){\n\tenv->ReleaseStringUTFChars(pathToFile, str);\n\tiWidth = getVideoWidth();\n\tiHeight = getVideoHeight();\n\tFrameSize = iWidth * iHeight;\n\tVideoDecOutputSize = (FrameSize * 3)>>1;\n\taOutBuffer = (uint8*)malloc(VideoDecOutputSize);\n\tparserInitialized = 1;\n  } else {\n\tenv->ReleaseStringUTFChars(pathToFile, str);\n\treturn 0;\n  }\n\n  /* Init decoder */\n  if (decoderInitialized == 1){\n\t  deinitDecoder();\n  }\n  if(initDecoder(iWidth,iHeight)== 0) return 0;\n\n  return 1;\n\n}\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser\n * Method:    DeinitParser\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_DeinitParser\n  (JNIEnv *env, jclass clazz){\n\tif (decoderInitialized == 1) deinitDecoder();\n\tparserInitialized = 0;\n\treturn release();\n}\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser\n * Method:    getVideoLength\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_getVideoLength\n  (JNIEnv *env, jclass clazz){\n\tjint videoLength = getVideoDuration();\n  return videoLength;\n}\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser\n * Method:    getVideoWidth\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_getVideoWidth\n  (JNIEnv *env, jclass clazz)\n{\n    return getVideoWidth();\n}\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser\n * Method:    getVideoHeight\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_getVideoHeight\n  (JNIEnv *env, jclass clazz)\n{\n    return getVideoHeight();\n}\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser\n * Method:    getVideoCoding\n * Signature: ()Ljava/lang/String;\n */\nJNIEXPORT jstring JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_getVideoCoding\n  (JNIEnv *env, jclass clazz)\n{\n  jstring stringVideoCoding;\n  char* charVideoCoding = getVideoCodec();\n  stringVideoCoding = (env)->NewStringUTF(charVideoCoding);\n  return stringVideoCoding;\n}\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_Native3GPPFileParser\n * Method:    getVideoSample\n * Signature: ([I)Lcom/orangelabs/rcs/core/ims/protocol/rtp/codec/video/VideoSample\n */\nJNIEXPORT jobject JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_getVideoSample\n  (JNIEnv *env, jclass clazz, jintArray Decoded)\n{\n\tjobject object = NULL;\n\n\t/* Error return */\n\tif (parserInitialized == 0){\n\t\treturn object;\n\t}\n\t// Get the new frame\n\tif (getFrame(aOutBuffer,&aOutBufferSize,&aOutTimestamp)!= VPAtomSucces){\n\t\treturn object;\n\t}\n\n\t/* Set frame with aOutBuffer data and timestamp*/\n\tjbyteArray H263Frame = (env)->NewByteArray(aOutBufferSize);\n\t(env)->SetByteArrayRegion(H263Frame, 0, aOutBufferSize, (const jbyte*)aOutBuffer);\n\n\t/* Decode */\n\tuint32 timestamp[]={aOutTimestamp};\n\tuint usetimestamp[]={0};\n\tvolbuf[0] = aOutBuffer;\n\tvolbuf_size[0]=aOutBufferSize;\n\n\tif (!PVDecodeVideoFrame(&iDecoderControl, volbuf,timestamp,volbuf_size,usetimestamp,pFrame0)){\n\t\treturn object;\n\t}\n\t/* Copy result to YUV  array ! */\n\tuint8* pTempFrame;\n\tuint8* decodedFrame = iDecoderControl.outputFrame;\n\tpTempFrame = (uint8*) pFrame0;\n\tpFrame0 = (uint8*) pFrame1;\n\tpFrame1 = (uint8*) pTempFrame;\n\n  \t/* Create the output buffer */\n\tuint32* resultBuffer = (uint32*)malloc(FrameSize*sizeof(uint32));\n  \t/* Convert YUV to RGB */\n\tconvert(iWidth,iHeight,decodedFrame,resultBuffer);\n  \t/* Set Decoded */\n\t(env)->SetIntArrayRegion(Decoded, 0, FrameSize, (const jint*)resultBuffer);\n  \tfree(resultBuffer);\n\t\n\t// Create new object\n\t/* Find class and method to return VideoSample*/\n\tjclass classe = (env)->FindClass(\"com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h263/decoder/VideoSample\");\n\tif (classe == 0) {\n\t  return object;\n\t}\n\tjmethodID mid = (env)->GetMethodID(classe,\"<init>\",\"([BI)V\");\n\tif (mid == 0) {\n\t  return object;\n\t}\n\tobject = (env)->NewObject(classe,mid,H263Frame,aOutTimestamp);\n\tif (object == 0) {\n\t  return object;\n\t}\n\t// Return created object\n\treturn object;\n}\n\n/*\n * This is called by the VM when the shared library is first loaded.\n */\njint JNI_OnLoad(JavaVM* vm, void* reserved) {\n    JNIEnv* env = NULL;\n    jint result = -1;\n    if (vm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) {\n        goto bail;\n    }\n    /* success -- return valid version number */\n    result = JNI_VERSION_1_4;\nbail:\n    return result;\n}\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder.h",
    "content": "/* DO NOT EDIT THIS FILE - it is machine generated */\n#include <jni.h>\n/* Header for class com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder */\n\n#ifndef _Included_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder\n#define _Included_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder\n * Method:    InitDecoder\n * Signature: (II)I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_InitDecoder\n  (JNIEnv *, jclass, jint, jint);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder\n * Method:    DeinitDecoder\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_DeinitDecoder\n  (JNIEnv *, jclass);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder\n * Method:    DecodeAndConvert\n * Signature: ([B[IJ)I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_DecodeAndConvert\n  (JNIEnv *, jclass, jbyteArray, jintArray, jlong);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder\n * Method:    InitParser\n * Signature: (Ljava/lang/String;)I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_InitParser\n  (JNIEnv *, jclass, jstring);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder\n * Method:    DeinitParser\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_DeinitParser\n  (JNIEnv *, jclass);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder\n * Method:    getVideoLength\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_getVideoLength\n  (JNIEnv *, jclass);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder\n * Method:    getVideoWidth\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_getVideoWidth\n  (JNIEnv *, jclass);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder\n * Method:    getVideoHeight\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_getVideoHeight\n  (JNIEnv *, jclass);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder\n * Method:    getVideoCoding\n * Signature: ()Ljava/lang/String;\n */\nJNIEXPORT jstring JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_getVideoCoding\n  (JNIEnv *, jclass);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder\n * Method:    getVideoSample\n * Signature: ([I)Lcom/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h263/decoder/VideoSample;\n */\nJNIEXPORT jobject JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_decoder_NativeH263Decoder_getVideoSample\n  (JNIEnv *, jclass, jintArray);\n\n#ifdef __cplusplus\n}\n#endif\n#endif\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/combined_decode.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"mp4dec_lib.h\" /* video decoder function prototypes */\n#include \"vlc_decode.h\"\n#include \"bitstream.h\"\n#include \"scaling.h\"\n#include \"mbtype_mode.h\"\n\n#define OSCL_DISABLE_WARNING_CONDITIONAL_IS_CONSTANT\n#include \"osclconfig_compiler_warnings.h\"\n/* ======================================================================== */\n/*  Function : DecodeFrameCombinedMode()                                    */\n/*  Purpose  : Decode a frame of MPEG4 bitstream in combined mode.          */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/*                                                                          */\n/*      03/30/2000 : Cleaned up and optimized the code.             */\n/*      03/31/2000 : Added proper handling of MB stuffing.          */\n/*      04/13/2000 : Rewrote this combined mode path completely     */\n/*                           so that it handles \"Combined Mode With Error   */\n/*                           Resilience.\"  Now the code resembles the       */\n/*                           pseudo codes in MPEG-4 standard better.        */\n/*      10/13/2000 : Add fast VLC+dequant                           */\n/*      04/13/2001 : fix MB_stuffing                               */\n/*      08/07/2001 : remove MBzero                                  */\n/* ======================================================================== */\nPV_STATUS DecodeFrameCombinedMode(VideoDecData *video)\n{\n    PV_STATUS status;\n    int mbnum;\n    Vop *currVop = video->currVop;\n    BitstreamDecVideo *stream = video->bitstream;\n    int shortVideoHeader = video->shortVideoHeader;\n    int16 QP, *QPMB = video->QPMB;\n    uint8 *Mode = video->headerInfo.Mode;\n    int nTotalMB = video->nTotalMB;\n    int nMBPerRow = video->nMBPerRow;\n    int slice_counter;\n    uint32 tmpvar, long_zero_bits;\n    uint code;\n    int valid_stuffing;\n    int resync_marker_length;\n    int stuffing_length;\n\n    /* add this for error resilient, 05/18/2000 */\n    int32 startPacket;\n    int mb_start;\n    /* copy and pad to prev_Vop for INTER coding */\n    switch (currVop->predictionType)\n    {\n        case I_VOP :\n//      oscl_memset(Mode, MODE_INTRA, sizeof(uint8)*nTotalMB);\n            resync_marker_length = 17;\n            stuffing_length = 9;\n            break;\n        case P_VOP :\n            oscl_memset(video->motX, 0, sizeof(MOT)*4*nTotalMB);\n            oscl_memset(video->motY, 0, sizeof(MOT)*4*nTotalMB);\n//      oscl_memset(Mode, MODE_INTER, sizeof(uint8)*nTotalMB);\n            resync_marker_length = 16 + currVop->fcodeForward;\n            stuffing_length = 10;\n            break;\n        default :\n            mp4dec_log(\"DecodeFrameCombinedMode(): Vop type not supported.\\n\");\n            return PV_FAIL;\n    }\n#ifdef PV_ANNEX_IJKT_SUPPORT\n    if (video->shortVideoHeader & PV_H263)\n    {\n        if (video->advanced_INTRA)\n        {\n            if (video->modified_quant)\n            {\n                video->vlcDecCoeffIntra = &VlcDecTCOEFShortHeader_AnnexIT;\n                video->vlcDecCoeffInter = &VlcDecTCOEFShortHeader_AnnexT;\n            }\n            else\n            {\n                video->vlcDecCoeffIntra = &VlcDecTCOEFShortHeader_AnnexI;\n                video->vlcDecCoeffInter = &VlcDecTCOEFShortHeader;\n            }\n        }\n        else\n        {\n            if (video->modified_quant)\n            {\n                video->vlcDecCoeffInter = video->vlcDecCoeffIntra = &VlcDecTCOEFShortHeader_AnnexT;\n            }\n            else\n            {\n                video->vlcDecCoeffInter = video->vlcDecCoeffIntra = &VlcDecTCOEFShortHeader;\n            }\n        }\n    }\n\n#endif\n\n    /** Initialize sliceNo ***/\n    mbnum = slice_counter = 0;\n//  oscl_memset(video->sliceNo, 0, sizeof(uint8)*nTotalMB);\n    QP = video->currVop->quantizer;\n\n    do\n    {\n        /* This section is equivalent to motion_shape_texture() */\n        /*    in the MPEG-4 standard.     04/13/2000          */\n        mb_start = mbnum;\n        video->usePrevQP = 0;             /*  04/27/01 */\n        startPacket = getPointer(stream);\n\n#ifdef PV_ANNEX_IJKT_SUPPORT\n        if (video->modified_quant)\n        {\n            video->QP_CHR = MQ_chroma_QP_table[QP];\n        }\n        else\n        {\n            video->QP_CHR = QP;     /* ANNEX_T */\n        }\n#endif\n        /* remove any stuffing bits */\n        BitstreamShowBits16(stream, stuffing_length, &code);\n        while (code == 1)\n        {\n            PV_BitstreamFlushBits(stream, stuffing_length);\n            BitstreamShowBits16(stream, stuffing_length, &code);\n        }\n\n        do\n        {\n            /* we need video->mbnum in lower level functions */\n            video->mbnum = mbnum;\n            video->mbnum_row = PV_GET_ROW(mbnum, nMBPerRow);\n            video->mbnum_col = mbnum - video->mbnum_row * nMBPerRow;\n            /* assign slice number for each macroblocks */\n            video->sliceNo[mbnum] = (uint8) slice_counter;\n\n            /* decode COD, MCBPC, ACpred_flag, CPBY and DQUANT */\n            /* We have to discard stuffed MB header */\n            status = GetMBheader(video, &QP);\n\n            if (status != PV_SUCCESS)\n            {\n                VideoDecoderErrorDetected(video);\n                video->mbnum = mb_start;\n                movePointerTo(stream, (startPacket & -8));\n                break;\n            }\n\n            /* Store the QP value for later use in AC prediction */\n            QPMB[mbnum] = QP;\n\n            if (Mode[mbnum] != MODE_SKIPPED)\n            {\n                /* decode the DCT coeficients for the MB */\n                status = GetMBData(video);\n                if (status != PV_SUCCESS)\n                {\n                    VideoDecoderErrorDetected(video);\n                    video->mbnum = mb_start;\n                    movePointerTo(stream, (startPacket & -8));\n                    break;\n                }\n            }\n            else /* MODE_SKIPPED */\n            {\n                SkippedMBMotionComp(video); /*  08/04/05 */\n            }\n            // Motion compensation and put video->mblock->pred_block\n            mbnum++;\n\n            /* remove any stuffing bits */\n            BitstreamShowBits16(stream, stuffing_length, &code);\n            while (code == 1)\n            {\n                PV_BitstreamFlushBits(stream, stuffing_length);\n                BitstreamShowBits16(stream, stuffing_length, &code);\n            }\n\n            /* have we reached the end of the video packet or vop? */\n            if (shortVideoHeader)\n            {\n#ifdef PV_ANNEX_IJKT_SUPPORT\n                if (!video->slice_structure)\n                {\n#endif\n                    if (mbnum >= (int)(video->mbnum_row + 1)*video->nMBinGOB)   /*  10/11/01 */\n                    {\n                        if (mbnum >= nTotalMB) return PV_SUCCESS;\n                        status = BitstreamShowBits32(stream, GOB_RESYNC_MARKER_LENGTH, &tmpvar);\n\n                        if (tmpvar == GOB_RESYNC_MARKER)\n                        {\n                            break;\n                        }\n                        else\n                        {\n                            status = PV_BitstreamShowBitsByteAlign(stream, GOB_RESYNC_MARKER_LENGTH, &tmpvar);\n                            if (tmpvar == GOB_RESYNC_MARKER) break;\n                        }\n                    }\n#ifdef PV_ANNEX_IJKT_SUPPORT\n                }\n                else\n                {\n\n                    if (mbnum >= nTotalMB)  /* in case no valid stuffing  06/23/01 */\n                    {\n                        valid_stuffing = validStuffing_h263(stream);\n                        if (valid_stuffing == 0)\n                        {\n                            VideoDecoderErrorDetected(video);\n                            ConcealPacket(video, mb_start, nTotalMB, slice_counter);\n                        }\n                        return PV_SUCCESS;\n                    }\n                    /* ANNEX_K */\n                    PV_BitstreamShowBitsByteAlignNoForceStuffing(stream, 17, &tmpvar);\n                    if (tmpvar == RESYNC_MARKER)\n                    {\n                        valid_stuffing = validStuffing_h263(stream);\n                        if (valid_stuffing)\n                            break; /*  06/21/01 */\n                    }\n\n                }\n#endif\n            }\n            else\n            {\n                if (mbnum >= nTotalMB)  /* in case no valid stuffing  06/23/01 */\n                {\n                    /*  11/01/2002 if we are at the end of the frame and there is some garbage data\n                    at the end of the frame (i.e. no next startcode) break if the stuffing is valid */\n                    valid_stuffing = validStuffing(stream);\n                    if (valid_stuffing == 0)\n                    {\n                        /* end 11/01/2002 */\n                        VideoDecoderErrorDetected(video);\n                        ConcealPacket(video, mb_start, nTotalMB, slice_counter);\n                    }\n                    PV_BitstreamByteAlign(stream);\n                    return PV_SUCCESS;\n                }\n\n                status = PV_BitstreamShowBitsByteAlign(stream, 23, &tmpvar); /* this call is valid for f_code < 8 */\n                long_zero_bits = !tmpvar;\n\n                if ((tmpvar >> (23 - resync_marker_length)) == RESYNC_MARKER || long_zero_bits)\n                {\n                    valid_stuffing = validStuffing(stream);\n                    if (valid_stuffing)\n                        break; /*  06/21/01 */\n                }\n\n            }\n        }\n        while (TRUE);\n\n        if (shortVideoHeader)\n        { /* We need to check newgob to refresh quantizer */\n#ifdef PV_ANNEX_IJKT_SUPPORT\n            if (!video->slice_structure)\n            {\n#endif\n                while ((status = PV_GobHeader(video)) == PV_FAIL)\n                {\n                    if ((status = quickSearchGOBHeader(stream)) != PV_SUCCESS)\n                    {\n                        break;\n                    }\n                }\n\n                mbnum = currVop->gobNumber * video->nMBinGOB;\n#ifdef PV_ANNEX_IJKT_SUPPORT\n            }\n            else\n            {\n                while ((status = PV_H263SliceHeader(video, &mbnum)) == PV_FAIL)\n                {\n                    if ((status = quickSearchH263SliceHeader(stream)) != PV_SUCCESS)\n                    {\n                        break;\n                    }\n                }\n            }\n\n#endif\n        }\n        else\n        {\n            while ((status = PV_ReadVideoPacketHeader(video, &mbnum)) == PV_FAIL)\n            {\n                if ((status = quickSearchVideoPacketHeader(stream, resync_marker_length)) != PV_SUCCESS)\n                {\n                    break;\n                }\n            }\n        }\n\n        if (status == PV_END_OF_VOP)\n        {\n            mbnum = nTotalMB;\n        }\n\n        if (mbnum > video->mbnum + 1)\n        {\n            ConcealPacket(video, video->mbnum, mbnum, slice_counter);\n        }\n        QP = video->currVop->quantizer;\n        slice_counter++;\n        if (mbnum >= nTotalMB) break;\n\n    }\n    while (TRUE);\n    return PV_SUCCESS;\n}\n\n\n/* ============================================================================ */\n/*  Function : GetMBHeader()                                                    */\n/*  Purpose  : Decode MB header, not_coded, mcbpc, ac_pred_flag, cbpy, dquant.  */\n/*  In/out   :                                                                  */\n/*  Return   :                                                                  */\n/*  Modified :                                                                  */\n/*                                                                              */\n/*      3/29/00 : Changed the returned value and optimized the code.    */\n/*      4/01/01 : new ACDC prediction structure                         */\n/* ============================================================================ */\nPV_STATUS GetMBheader(VideoDecData *video, int16 *QP)\n{\n    BitstreamDecVideo *stream = video->bitstream;\n    int mbnum = video->mbnum;\n    uint8 *Mode = video->headerInfo.Mode;\n    int x_pos = video->mbnum_col;\n    typeDCStore *DC = video->predDC + mbnum;\n    typeDCACStore *DCAC_row = video->predDCAC_row + x_pos;\n    typeDCACStore *DCAC_col = video->predDCAC_col;\n    const static int16  DQ_tab[4] = { -1, -2, 1, 2};\n\n    int CBPY, CBPC;\n    int MBtype, VopType;\n    int MCBPC;\n    uint DQUANT;\n    int comp;\n    Bool mb_coded;\n\n    VopType = video->currVop->predictionType;\n    mb_coded = ((VopType == I_VOP) ? TRUE : !BitstreamRead1Bits_INLINE(stream));\n\n    if (!mb_coded)\n    {\n        /* skipped macroblock */\n        Mode[mbnum] = MODE_SKIPPED;\n        //oscl_memset(DCAC_row, 0, sizeof(typeDCACStore));   /*  SKIPPED_ACDC */\n        //oscl_memset(DCAC_col, 0, sizeof(typeDCACStore));\n        ZERO_OUT_64BYTES(DCAC_row);\n        ZERO_OUT_64BYTES(DCAC_col); /*  08/12/05 */\n\n        for (comp = 0; comp < 6; comp++)\n        {\n            (*DC)[comp] = mid_gray;\n        }\n    }\n    else\n    {\n        /* coded macroblock */\n        if (VopType == I_VOP)\n        {\n            MCBPC = PV_VlcDecMCBPC_com_intra(stream);\n        }\n        else\n        {\n#ifdef PV_ANNEX_IJKT_SUPPORT\n            if (!video->deblocking)\n            {\n                MCBPC = PV_VlcDecMCBPC_com_inter(stream);\n            }\n            else\n            {\n                MCBPC = PV_VlcDecMCBPC_com_inter_H263(stream);\n            }\n#else\n            MCBPC = PV_VlcDecMCBPC_com_inter(stream);\n#endif\n        }\n\n        if (VLC_ERROR_DETECTED(MCBPC))\n        {\n            return PV_FAIL;\n        }\n\n        Mode[mbnum] = (uint8)(MBtype = MBtype_mode[MCBPC & 7]);\n        CBPC = (MCBPC >> 4) & 3;\n\n#ifdef PV_ANNEX_IJKT_SUPPORT\n        if (MBtype & INTRA_MASK)\n        {\n            if (!video->shortVideoHeader)\n            {\n                video->acPredFlag[mbnum] = (uint8) BitstreamRead1Bits(stream);\n            }\n            else\n            {\n                if (video->advanced_INTRA)\n                {\n                    if (!BitstreamRead1Bits(stream))\n                    {\n                        video->acPredFlag[mbnum] = 0;\n                    }\n                    else\n                    {\n                        video->acPredFlag[mbnum] = 1;\n                        if (BitstreamRead1Bits(stream))\n                        {\n                            video->mblock->direction = 0;\n                        }\n                        else\n                        {\n                            video->mblock->direction = 1;\n                        }\n                    }\n                }\n                else\n                {\n                    video->acPredFlag[mbnum] = 0;\n                }\n            }\n        }\n#else\n        if ((MBtype & INTRA_MASK) && !video->shortVideoHeader)\n        {\n            video->acPredFlag[mbnum] = (uint8) BitstreamRead1Bits_INLINE(stream);\n        }\n        else\n        {\n            video->acPredFlag[mbnum] = 0;\n        }\n#endif\n        CBPY = PV_VlcDecCBPY(stream, MBtype & INTRA_MASK); /* INTRA || INTRA_Q */\n        if (CBPY < 0)\n        {\n            return PV_FAIL;\n        }\n\n        // GW 04/23/99\n        video->headerInfo.CBP[mbnum] = (uint8)(CBPY << 2 | (CBPC & 3));\n#ifdef PV_ANNEX_IJKT_SUPPORT\n        if (MBtype & Q_MASK)\n        {\n            if (!video->modified_quant)\n            {\n                DQUANT = BitstreamReadBits16(stream, 2);\n                *QP += DQ_tab[DQUANT];\n\n                if (*QP < 1) *QP = 1;\n                else if (*QP > 31) *QP = 31;\n                video->QP_CHR = *QP;  /* ANNEX_T */\n            }\n            else\n            {\n                if (BitstreamRead1Bits(stream))\n                {\n                    if (BitstreamRead1Bits(stream))\n                    {\n                        *QP += DQ_tab_Annex_T_11[*QP];\n                    }\n                    else\n                    {\n                        *QP += DQ_tab_Annex_T_10[*QP];\n                    }\n                    if (*QP < 1) *QP = 1;\n                    else if (*QP > 31) *QP = 31;\n                }\n                else\n                {\n                    *QP = (int16)BitstreamReadBits16(stream, 5);\n                }\n                video->QP_CHR =  MQ_chroma_QP_table[*QP];\n            }\n        }\n#else\n        if (MBtype & Q_MASK)\n        {\n            DQUANT = BitstreamReadBits16(stream, 2);\n            *QP += DQ_tab[DQUANT];\n\n            if (*QP < 1) *QP = 1;\n            else if (*QP > 31) *QP = 31;\n        }\n#endif\n    }\n    return PV_SUCCESS;\n}\n\n\n\n\n\n/***********************************************************CommentBegin******\n*       3/10/00  : initial modification to the\n*                new PV-Decoder Lib format.\n*       4/2/2000 : Cleanup and error-handling modification.  This\n*                   function has been divided into several sub-functions for\n*                   better coding style and maintainance reason.  I also\n*                   greatly shrunk the code size here.\n*       9/18/2000 : VlcDecode+Dequant optimization *\n*       4/01/2001 : new ACDC prediction structure\n*       3/29/2002 : removed GetIntraMB and GetInterMB\n***********************************************************CommentEnd********/\nPV_STATUS GetMBData(VideoDecData *video)\n{\n    BitstreamDecVideo *stream = video->bitstream;\n    int mbnum = video->mbnum;\n    MacroBlock *mblock = video->mblock;\n    int16 *dataBlock;\n    PIXEL *c_comp;\n    uint mode = video->headerInfo.Mode[mbnum];\n    uint CBP = video->headerInfo.CBP[mbnum];\n    typeDCStore *DC = video->predDC + mbnum;\n    int intra_dc_vlc_thr = video->currVop->intraDCVlcThr;\n    int16 QP = video->QPMB[mbnum];\n    int16 QP_tmp = QP;\n    int width = video->width;\n    int  comp;\n    int  switched;\n    int ncoeffs[6] = {0, 0, 0, 0, 0, 0};\n    int *no_coeff = mblock->no_coeff;\n    int16 DC_coeff;\n    PV_STATUS status;\n\n#ifdef PV_POSTPROC_ON\n    /* post-processing */\n    uint8 *pp_mod[6];\n    int TotalMB = video->nTotalMB;\n    int MB_in_width = video->nMBPerRow;\n#endif\n    int y_pos = video->mbnum_row;\n    int x_pos = video->mbnum_col;\n    int32 offset = (int32)(y_pos << 4) * width + (x_pos << 4);\n\n    /* Decode each 8-by-8 blocks. comp 0 ~ 3 are luminance blocks, 4 ~ 5 */\n    /*  are chrominance blocks.   04/03/2000.                          */\n#ifdef PV_POSTPROC_ON\n    if (video->postFilterType != PV_NO_POST_PROC)\n    {\n        /** post-processing ***/\n        pp_mod[0] = video->pstprcTypCur + (y_pos << 1) * (MB_in_width << 1) + (x_pos << 1);\n        pp_mod[1] = pp_mod[0] + 1;\n        pp_mod[2] = pp_mod[0] + (MB_in_width << 1);\n        pp_mod[3] = pp_mod[2] + 1;\n        pp_mod[4] = video->pstprcTypCur + (TotalMB << 2) + mbnum;\n        pp_mod[5] = pp_mod[4] + TotalMB;\n    }\n#endif\n\n    /*  oscl_memset(mblock->block, 0, sizeof(typeMBStore));    Aug 9,2005 */\n\n    if (mode & INTRA_MASK) /* MODE_INTRA || MODE_INTRA_Q */\n    {\n        switched = 0;\n        if (intra_dc_vlc_thr)\n        {\n            if (video->usePrevQP)\n                QP_tmp = video->QPMB[mbnum-1];   /* running QP  04/26/01 */\n\n            switched = (intra_dc_vlc_thr == 7 || QP_tmp >= intra_dc_vlc_thr * 2 + 11);\n        }\n\n        mblock->DCScalarLum = cal_dc_scaler(QP, LUMINANCE_DC_TYPE);   /*  3/01/01 */\n        mblock->DCScalarChr = cal_dc_scaler(QP, CHROMINANCE_DC_TYPE);\n\n        for (comp = 0; comp < 6; comp++)\n        {\n            dataBlock = mblock->block[comp];    /* 10/20/2000 */\n\n            if (video->shortVideoHeader)\n            {\n#ifdef PV_ANNEX_IJKT_SUPPORT\n                if (!video->advanced_INTRA)\n                {\n#endif\n                    DC_coeff = (int16) BitstreamReadBits16_INLINE(stream, 8);\n\n                    if ((DC_coeff & 0x7f) == 0) /* 128 & 0  */\n                    {\n                        /* currently we will only signal FAIL for 128. We will ignore the 0 case  */\n                        if (DC_coeff == 128)\n                        {\n                            return PV_FAIL;\n                        }\n                        else\n                        {\n                            VideoDecoderErrorDetected(video);\n                        }\n                    }\n                    if (DC_coeff == 255)\n                    {\n                        DC_coeff = 128;\n                    }\n                    dataBlock[0] = (int16) DC_coeff;\n#ifdef PV_ANNEX_IJKT_SUPPORT\n                }\n#endif\n                ncoeffs[comp] = VlcDequantH263IntraBlock_SH(video, comp, mblock->bitmapcol[comp], &mblock->bitmaprow[comp]);\n\n            }\n            else\n            {\n                if (switched == 0)\n                {\n                    status = PV_DecodePredictedIntraDC(comp, stream, &DC_coeff);\n                    if (status != PV_SUCCESS) return PV_FAIL;\n\n                    dataBlock[0] = (int16) DC_coeff;\n                }\n                ncoeffs[comp] = VlcDequantH263IntraBlock(video, comp,\n                                switched, mblock->bitmapcol[comp], &mblock->bitmaprow[comp]);\n            }\n\n            if (VLC_ERROR_DETECTED(ncoeffs[comp]))\n            {\n                if (switched)\n                    return PV_FAIL;\n                else\n                {\n                    ncoeffs[comp] = 1;\n                    oscl_memset((dataBlock + 1), 0, sizeof(int16)*63);\n                }\n            }\n            no_coeff[comp] = ncoeffs[comp];\n\n#ifdef PV_POSTPROC_ON\n            if (video->postFilterType != PV_NO_POST_PROC)\n                *pp_mod[comp] = (uint8) PostProcSemaphore(dataBlock);\n#endif\n        }\n        MBlockIDCT(video);\n    }\n    else      /* INTER modes */\n    {   /*  moved it here Aug 15, 2005 */\n        /* decode the motion vector (if there are any) */\n        status = PV_GetMBvectors(video, mode);\n        if (status != PV_SUCCESS)\n        {\n            return status;\n        }\n\n\n        MBMotionComp(video, CBP);\n        c_comp  = video->currVop->yChan + offset;\n\n#ifdef PV_ANNEX_IJKT_SUPPORT\n        for (comp = 0; comp < 4; comp++)\n        {\n            (*DC)[comp] = mid_gray;\n            if (CBP & (1 << (5 - comp)))\n            {\n                ncoeffs[comp] = VlcDequantH263InterBlock(video, comp, mblock->bitmapcol[comp], &mblock->bitmaprow[comp]);\n                if (VLC_ERROR_DETECTED(ncoeffs[comp])) return PV_FAIL;\n\n                BlockIDCT(c_comp + (comp&2)*(width << 2) + 8*(comp&1), mblock->pred_block + (comp&2)*64 + 8*(comp&1), mblock->block[comp], width, ncoeffs[comp],\n                          mblock->bitmapcol[comp], mblock->bitmaprow[comp]);\n\n#ifdef PV_POSTPROC_ON\n                /* for inter just test for ringing */\n                if (video->postFilterType != PV_NO_POST_PROC)\n                    *pp_mod[comp] = (uint8)((ncoeffs[comp] > 3) ? 4 : 0);\n#endif\n            }\n            else\n            {\n                /* no IDCT for all zeros blocks  03/28/2002 */\n                /*              BlockIDCT();                */\n#ifdef PV_POSTPROC_ON\n                if (video->postFilterType != PV_NO_POST_PROC)\n                    *pp_mod[comp] = 0;\n#endif\n            }\n        }\n\n        video->QPMB[mbnum] = video->QP_CHR;     /* ANNEX_T */\n\n\n\n        (*DC)[4] = mid_gray;\n        if (CBP & 2)\n        {\n            ncoeffs[4] = VlcDequantH263InterBlock(video, 4, mblock->bitmapcol[4], &mblock->bitmaprow[4]);\n            if (VLC_ERROR_DETECTED(ncoeffs[4])) return PV_FAIL;\n\n            BlockIDCT(video->currVop->uChan + (offset >> 2) + (x_pos << 2), mblock->pred_block + 256, mblock->block[4], width >> 1, ncoeffs[4],\n                      mblock->bitmapcol[4], mblock->bitmaprow[4]);\n\n#ifdef PV_POSTPROC_ON\n            /* for inter just test for ringing */\n            if (video->postFilterType != PV_NO_POST_PROC)\n                *pp_mod[4] = (uint8)((ncoeffs[4] > 3) ? 4 : 0);\n#endif\n        }\n        else\n        {\n            /* no IDCT for all zeros blocks  03/28/2002 */\n            /*              BlockIDCT();                */\n#ifdef PV_POSTPROC_ON\n            if (video->postFilterType != PV_NO_POST_PROC)\n                *pp_mod[4] = 0;\n#endif\n        }\n        (*DC)[5] = mid_gray;\n        if (CBP & 1)\n        {\n            ncoeffs[5] = VlcDequantH263InterBlock(video, 5, mblock->bitmapcol[5], &mblock->bitmaprow[5]);\n            if (VLC_ERROR_DETECTED(ncoeffs[5])) return PV_FAIL;\n\n            BlockIDCT(video->currVop->vChan + (offset >> 2) + (x_pos << 2), mblock->pred_block + 264, mblock->block[5], width >> 1, ncoeffs[5],\n                      mblock->bitmapcol[5], mblock->bitmaprow[5]);\n\n#ifdef PV_POSTPROC_ON\n            /* for inter just test for ringing */\n            if (video->postFilterType != PV_NO_POST_PROC)\n                *pp_mod[5] = (uint8)((ncoeffs[5] > 3) ? 4 : 0);\n#endif\n        }\n        else\n        {\n            /* no IDCT for all zeros blocks  03/28/2002 */\n            /*              BlockIDCT();                */\n#ifdef PV_POSTPROC_ON\n            if (video->postFilterType != PV_NO_POST_PROC)\n                *pp_mod[5] = 0;\n#endif\n        }\n        video->QPMB[mbnum] = QP;  /* restore the QP values  ANNEX_T*/\n#else\n        for (comp = 0; comp < 4; comp++)\n        {\n            (*DC)[comp] = mid_gray;\n            if (CBP & (1 << (5 - comp)))\n            {\n                ncoeffs[comp] = VlcDequantH263InterBlock(video, comp, mblock->bitmapcol[comp], &mblock->bitmaprow[comp]);\n                if (VLC_ERROR_DETECTED(ncoeffs[comp])) return PV_FAIL;\n\n                BlockIDCT(c_comp + (comp&2)*(width << 2) + 8*(comp&1), mblock->pred_block + (comp&2)*64 + 8*(comp&1), mblock->block[comp], width, ncoeffs[comp],\n                          mblock->bitmapcol[comp], mblock->bitmaprow[comp]);\n\n#ifdef PV_POSTPROC_ON\n                /* for inter just test for ringing */\n                if (video->postFilterType != PV_NO_POST_PROC)\n                    *pp_mod[comp] = (uint8)((ncoeffs[comp] > 3) ? 4 : 0);\n#endif\n            }\n            else\n            {\n                /* no IDCT for all zeros blocks  03/28/2002 */\n                /*              BlockIDCT();                */\n#ifdef PV_POSTPROC_ON\n                if (video->postFilterType != PV_NO_POST_PROC)\n                    *pp_mod[comp] = 0;\n#endif\n            }\n        }\n\n        (*DC)[4] = mid_gray;\n        if (CBP & 2)\n        {\n            ncoeffs[4] = VlcDequantH263InterBlock(video, 4, mblock->bitmapcol[4], &mblock->bitmaprow[4]);\n            if (VLC_ERROR_DETECTED(ncoeffs[4])) return PV_FAIL;\n\n            BlockIDCT(video->currVop->uChan + (offset >> 2) + (x_pos << 2), mblock->pred_block + 256, mblock->block[4], width >> 1, ncoeffs[4],\n                      mblock->bitmapcol[4], mblock->bitmaprow[4]);\n\n#ifdef PV_POSTPROC_ON\n            /* for inter just test for ringing */\n            if (video->postFilterType != PV_NO_POST_PROC)\n                *pp_mod[4] = (uint8)((ncoeffs[4] > 3) ? 4 : 0);\n#endif\n        }\n        else\n        {\n            /* no IDCT for all zeros blocks  03/28/2002 */\n            /*              BlockIDCT();                */\n#ifdef PV_POSTPROC_ON\n            if (video->postFilterType != PV_NO_POST_PROC)\n                *pp_mod[4] = 0;\n#endif\n        }\n        (*DC)[5] = mid_gray;\n        if (CBP & 1)\n        {\n            ncoeffs[5] = VlcDequantH263InterBlock(video, 5, mblock->bitmapcol[5], &mblock->bitmaprow[5]);\n            if (VLC_ERROR_DETECTED(ncoeffs[5])) return PV_FAIL;\n\n            BlockIDCT(video->currVop->vChan + (offset >> 2) + (x_pos << 2), mblock->pred_block + 264, mblock->block[5], width >> 1, ncoeffs[5],\n                      mblock->bitmapcol[5], mblock->bitmaprow[5]);\n\n#ifdef PV_POSTPROC_ON\n            /* for inter just test for ringing */\n            if (video->postFilterType != PV_NO_POST_PROC)\n                *pp_mod[5] = (uint8)((ncoeffs[5] > 3) ? 4 : 0);\n#endif\n        }\n        else\n        {\n            /* no IDCT for all zeros blocks  03/28/2002 */\n            /*              BlockIDCT();                */\n#ifdef PV_POSTPROC_ON\n            if (video->postFilterType != PV_NO_POST_PROC)\n                *pp_mod[5] = 0;\n#endif\n#endif  // PV_ANNEX_IJKT_SUPPORT\n\n\n\n\n\n\n    }\n\n    video->usePrevQP = 1;          /* should be set after decoding the first Coded  04/27/01 */\n    return PV_SUCCESS;\n}\n\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/conceal.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"mp4dec_lib.h\" /* video decoder function prototypes */\n#include \"vlc_decode.h\"\n#include \"bitstream.h\"\n#include \"scaling.h\"\n\n/* ====================================================================== /\nFunction : ConcealTexture_I()\nDate     : 06/12/2001\nPurpose  : Conceal texture for I-partition\nIn/out   :\nReturn   :\nModified :\n/ ====================================================================== */\nvoid ConcealTexture_I(VideoDecData *video, int32 startFirstPartition, int mb_start, int mb_stop, int slice_counter)\n{\n    int mbnum;\n    BitstreamDecVideo *stream = video->bitstream;\n    int16 QP;\n    int intra_dc_vlc_thr = video->currVop->intraDCVlcThr;\n\n    movePointerTo(stream, startFirstPartition);\n\n    video->usePrevQP = 0;\n    for (mbnum = mb_start; mbnum < mb_stop; mbnum++)\n    {\n        video->mbnum = mbnum;\n        video->mbnum_row = PV_GET_ROW(mbnum, video->nMBPerRow);\n        video->mbnum_col = mbnum - video->mbnum_row * video->nMBPerRow;\n        video->sliceNo[mbnum] = (uint8) slice_counter;\n        QP = video->QPMB[mbnum];\n        PV_VlcDecMCBPC_com_intra(stream);\n        GetMBheaderDataPart_DQUANT_DC(video, &QP);\n\n        if (intra_dc_vlc_thr)\n        {\n            if (video->usePrevQP)\n                QP = video->QPMB[mbnum-1];\n            if (intra_dc_vlc_thr == 7 || QP >= intra_dc_vlc_thr*2 + 11)  /* if switched then conceal from previous frame  */\n            {\n                ConcealPacket(video, mbnum, mb_stop, slice_counter);\n                video->mbnum = mb_stop - 1;\n                video->mbnum_row = PV_GET_ROW(video->mbnum, video->nMBPerRow);\n                video->mbnum_col = video->mbnum - video->mbnum_row * video->nMBPerRow;\n                break;\n            }\n        }\n\n        video->headerInfo.CBP[mbnum] = 0;\n        video->acPredFlag[mbnum] = 0;\n        GetMBData_DataPart(video);\n        video->usePrevQP = 1;\n    }\n    return;\n}\n\n/* ====================================================================== /\nFunction : ConcealTexture_P()\nDate     : 05/16/2000\nPurpose  : Conceal texture for P-partition\nIn/out   :\nReturn   :\n/ ====================================================================== */\n\nvoid ConcealTexture_P(VideoDecData *video, int mb_start, int mb_stop, int slice_counter)\n{\n    int mbnum;\n\n    for (mbnum = mb_start; mbnum < mb_stop; mbnum++)\n    {\n        video->mbnum = mbnum;\n        video->mbnum_row = PV_GET_ROW(mbnum, video->nMBPerRow);\n        video->mbnum_col = mbnum - video->mbnum_row * video->nMBPerRow;\n        video->sliceNo[mbnum] = (uint8) slice_counter;\n        oscl_memset(video->mblock->block, 0, sizeof(typeMBStore));\n        /*  to get rid of dark region caused by INTRA blocks */\n        /* 05/19/2000 */\n        if (video->headerInfo.Mode[mbnum] & INTER_MASK)\n        {\n            MBMotionComp(video, 0);\n        }\n        else\n        {\n            video->headerInfo.Mode[mbnum] = MODE_SKIPPED;\n            SkippedMBMotionComp(video);\n        }\n    }\n\n    return;\n}\n\n/***************************************************************\nFunction:   ConcealPacket\nPurpose :   Conceal motion and texture of a packet by direct\ncopying from previous frame.\nReturned:   void\nModified:\n*************************************************************/\nvoid ConcealPacket(VideoDecData *video,\n                   int mb_start,\n                   int mb_stop,\n                   int slice_counter)\n{\n    int i;\n    for (i = mb_start; i < mb_stop; i++)\n    {\n        CopyVopMB(video->currVop, video->concealFrame, i, video->width, video->height);\n        video->sliceNo[i] = (uint8) slice_counter;\n        video->headerInfo.Mode[i] = MODE_SKIPPED;\n    }\n\n    return;\n}\n\n/****************************************************************************\nFunction:   CopyVopMB\nPurpose :   Fill a macroblock with previous Vop.\nReturned    :   void\nModified:   6/04/2001 rewrote the function\n            copies from concealFrame\n****************************************************************************/\nvoid CopyVopMB(Vop *curr, uint8 *prevFrame, int mbnum, int width_Y, int height)\n{\n    int width_C = width_Y >> 1;\n    int row = MB_SIZE;\n    uint8              *y1, *y2, *u1, *u2, *v1, *v2;\n    int xpos, ypos, MB_in_width;\n    int32 lumstart, chrstart, size;\n\n    MB_in_width = (width_Y + 15) >> 4;\n    ypos = PV_GET_ROW(mbnum, MB_in_width);\n    xpos = mbnum - ypos * MB_in_width;\n    lumstart = (ypos << 4) * (int32)width_Y  + (xpos << 4);\n    chrstart = (ypos << 3) * (int32)width_C  + (xpos << 3);\n\n    size = (int32)height * width_Y;\n\n    y1 =  curr->yChan + lumstart;\n    u1 =  curr->uChan + chrstart;\n    v1 =  curr->vChan + chrstart;\n    y2 =  prevFrame + lumstart;\n    u2 =  prevFrame + size + chrstart;\n    v2 =  prevFrame + size + (size >> 2) + chrstart;\n    while (row)\n    {\n        oscl_memcpy(y1, y2, MB_SIZE);\n        y1 += width_Y;\n        y2 += width_Y;\n        oscl_memcpy(y1, y2, MB_SIZE);\n        y1 += width_Y;\n        y2 += width_Y;\n        oscl_memcpy(y1, y2, MB_SIZE);\n        y1 += width_Y;\n        y2 += width_Y;\n        oscl_memcpy(y1, y2, MB_SIZE);\n        y1 += width_Y;\n        y2 += width_Y;\n\n        oscl_memcpy(u1, u2, B_SIZE);\n        u1 += width_C;\n        u2 += width_C;\n        oscl_memcpy(u1, u2, B_SIZE);\n        u1 += width_C;\n        u2 += width_C;\n\n        oscl_memcpy(v1, v2, B_SIZE);\n        v1 += width_C;\n        v2 += width_C;\n        oscl_memcpy(v1, v2, B_SIZE);\n        v1 += width_C;\n        v2 += width_C;\n\n        row -= 4;\n    }\n    return;\n}               /* CopyVopMB */\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/datapart_decode.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"mp4dec_lib.h\"\n#include \"vlc_decode.h\"\n#include \"bitstream.h\"\n#include \"scaling.h\"\n#include \"mbtype_mode.h\"\n#include \"idct.h\"\n\n#define OSCL_DISABLE_WARNING_CONDITIONAL_IS_CONSTANT\n#include \"osclconfig_compiler_warnings.h\"\n/* ======================================================================== */\n/*  Function : DecodeFrameDataPartMode()                                    */\n/*  Purpose  : Decode a frame of MPEG4 bitstream in datapartitioning mode.  */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/*                                                                          */\n/*      04/25/2000 : Rewrite the data partitioning path completely  */\n/*                           according to the pseudo codes in MPEG-4        */\n/*                           standard.                                      */\n/*  Modified : 09/18/2000 add fast VlcDecode+Dequant                    */\n/*             04/17/2001 cleanup                                       */\n/* ======================================================================== */\nPV_STATUS DecodeFrameDataPartMode(VideoDecData *video)\n{\n    PV_STATUS status;\n    Vop *currVop = video->currVop;\n    BitstreamDecVideo *stream = video->bitstream;\n\n    int nMBPerRow = video->nMBPerRow;\n\n    int vopType = currVop->predictionType;\n    int mbnum;\n    int nTotalMB = video->nTotalMB;\n    int slice_counter;\n    int resync_marker_length;\n\n    /* copy and pad to prev_Vop for INTER coding */\n    switch (vopType)\n    {\n        case I_VOP :\n//      oscl_memset(Mode, MODE_INTRA, sizeof(uint8)*nTotalMB);\n            resync_marker_length = 17;\n            break;\n        case P_VOP :\n            oscl_memset(video->motX, 0, sizeof(MOT)*4*nTotalMB);\n            oscl_memset(video->motY, 0, sizeof(MOT)*4*nTotalMB);\n//      oscl_memset(Mode, MODE_INTER, sizeof(uint8)*nTotalMB);\n            resync_marker_length = 16 + currVop->fcodeForward;\n            break;\n        default :\n            mp4dec_log(\"DecodeFrameDataPartMode(): Vop type not supported.\\n\");\n            return PV_FAIL;\n    }\n\n    /** Initialize sliceNo ***/\n    mbnum = slice_counter = 0;\n//  oscl_memset(video->sliceNo, 0, sizeof(uint8)*nTotalMB);\n\n    do\n    {\n        /* This section is equivalent to motion_shape_texture() */\n        /* in the MPEG-4 standard.            04/13/2000      */\n        video->mbnum = mbnum;\n        video->mbnum_row = PV_GET_ROW(mbnum, nMBPerRow);   /*  This is needed if nbnum is read from the packet header */\n        video->mbnum_col = mbnum - video->mbnum_row * nMBPerRow;\n\n        switch (vopType)\n        {\n            case I_VOP :\n                status = DecodeDataPart_I_VideoPacket(video, slice_counter);\n                break;\n\n            case P_VOP :\n                status = DecodeDataPart_P_VideoPacket(video, slice_counter);\n                break;\n\n            default :\n                mp4dec_log(\"DecodeFrameDataPartMode(): Vop type not supported.\\n\");\n                return PV_FAIL;\n        }\n\n        while ((status = PV_ReadVideoPacketHeader(video, &mbnum)) == PV_FAIL)\n        {\n            if ((status = quickSearchVideoPacketHeader(stream, resync_marker_length)) != PV_SUCCESS)\n            {\n                break;\n            }\n        }\n\n        if (status == PV_END_OF_VOP)\n        {\n            mbnum = nTotalMB;\n        }\n\n        if (mbnum > video->mbnum + 1)\n        {\n            ConcealPacket(video, video->mbnum, mbnum, slice_counter);\n        }\n        slice_counter++;\n        if (mbnum >= nTotalMB)\n        {\n            break;\n        }\n\n\n    }\n    while (TRUE);\n\n    return PV_SUCCESS;\n}\n\n\n/* ======================================================================== */\n/*  Function : DecodeDataPart_I_VideoPacket()                               */\n/*  Date     : 04/25/2000                                                   */\n/*  Purpose  : Decode Data Partitioned Mode Video Packet in I-VOP           */\n/*  In/out   :                                                              */\n/*  Return   : PV_SUCCESS if successed, PV_FAIL if failed.                  */\n/*  Modified : 09/18/2000 add fast VlcDecode+Dequant                    */\n/*             04/01/2001 fixed MB_stuffing, removed unnecessary code   */\n/* ======================================================================== */\nPV_STATUS DecodeDataPart_I_VideoPacket(VideoDecData *video, int slice_counter)\n{\n    PV_STATUS status;\n    uint8 *Mode = video->headerInfo.Mode;\n    BitstreamDecVideo *stream = video->bitstream;\n    int  nTotalMB = video->nTotalMB;\n    int  mbnum, mb_start, mb_end;\n    int16 QP, *QPMB = video->QPMB;\n    int  MBtype, MCBPC, CBPY;\n    uint32 tmpvar;\n    uint code;\n    int nMBPerRow = video->nMBPerRow;\n    Bool valid_stuffing;\n    int32 startSecondPart, startFirstPart = getPointer(stream);\n\n    /* decode the first partition */\n    QP = video->currVop->quantizer;\n    mb_start = mbnum = video->mbnum;\n    video->usePrevQP = 0;         /*  04/27/01 */\n\n\n    BitstreamShowBits16(stream, 9, &code);\n    while (code == 1)\n    {\n        PV_BitstreamFlushBits(stream, 9);\n        BitstreamShowBits16(stream, 9, &code);\n    }\n\n    do\n    {\n        /* decode COD, MCBPC, ACpred_flag, CPBY and DQUANT */\n        MCBPC = PV_VlcDecMCBPC_com_intra(stream);\n\n        if (!VLC_ERROR_DETECTED(MCBPC))\n        {\n            Mode[mbnum] = (uint8)(MBtype = MBtype_mode[MCBPC & 7]);\n            video->headerInfo.CBP[mbnum] = (uint8)((MCBPC >> 4) & 3);\n            status = GetMBheaderDataPart_DQUANT_DC(video, &QP);\n            video->usePrevQP = 1;        /* set it after the first coded MB      04/27/01 */\n        }\n        else\n        {\n            /* Report the error to the application.   06/20/2000 */\n            VideoDecoderErrorDetected(video);\n            video->mbnum = mb_start;\n            movePointerTo(stream, startFirstPart);\n            return PV_FAIL;\n        }\n\n        video->sliceNo[mbnum] = (uint8) slice_counter;\n        QPMB[mbnum] = QP;\n        video->mbnum = ++mbnum;\n\n        BitstreamShowBits16(stream, 9, &code);\n        while (code == 1)\n        {\n            PV_BitstreamFlushBits(stream, 9);\n            BitstreamShowBits16(stream, 9, &code);\n        }\n        /* have we reached the end of the video packet or vop? */\n        status = BitstreamShowBits32(stream, DC_MARKER_LENGTH, &tmpvar);\n\n    }\n    while (tmpvar != DC_MARKER && video->mbnum < nTotalMB);\n\n    if (tmpvar == DC_MARKER)\n    {\n        PV_BitstreamFlushBits(stream, DC_MARKER_LENGTH);\n    }\n    else\n    {\n        status = quickSearchDCM(stream);\n        if (status == PV_SUCCESS)\n        {\n            /* only way you can end up being here is in the last packet,and there is stuffing at\n            the end of the first partition */\n            PV_BitstreamFlushBits(stream, DC_MARKER_LENGTH);\n        }\n        else\n        {\n            /* Report the error to the application.   06/20/2000 */\n            VideoDecoderErrorDetected(video);\n            movePointerTo(stream, startFirstPart);\n            video->mbnum = mb_start;\n            /* concealment will be taken care of in the upper layer */\n            return PV_FAIL;\n        }\n    }\n\n    /* decode the second partition */\n    startSecondPart = getPointer(stream);\n\n    mb_end = video->mbnum;\n\n    for (mbnum = mb_start; mbnum < mb_end; mbnum++)\n    {\n        MBtype = Mode[mbnum];\n        /* No skipped mode in I-packets  3/1/2001    */\n        video->mbnum = mbnum;\n\n        video->mbnum_row = PV_GET_ROW(mbnum, nMBPerRow);   /*  This is needed if nbnum is read from the packet header */\n        video->mbnum_col = mbnum - video->mbnum_row * nMBPerRow;\n        /* there is always acdcpred in DataPart mode  04/10/01 */\n        video->acPredFlag[mbnum] = (uint8) BitstreamRead1Bits(stream);\n\n        CBPY = PV_VlcDecCBPY(stream, MBtype & INTRA_MASK); /* MODE_INTRA || MODE_INTRA_Q */\n        if (CBPY < 0)\n        {\n            /* Report the error to the application.   06/20/2000 */\n            VideoDecoderErrorDetected(video);\n            movePointerTo(stream, startSecondPart); /*  */\n            /* Conceal packet,  05/15/2000 */\n            ConcealTexture_I(video, startFirstPart, mb_start, mb_end, slice_counter);\n            return PV_FAIL;\n        }\n\n        video->headerInfo.CBP[mbnum] |= (uint8)(CBPY << 2);\n    }\n\n    video->usePrevQP = 0;\n\n    for (mbnum = mb_start; mbnum < mb_end; mbnum++)\n    {\n        video->mbnum = mbnum;\n\n        video->mbnum_row = PV_GET_ROW(mbnum , nMBPerRow);  /*  This is needed if nbnum is read from the packet header */\n        video->mbnum_col = mbnum - video->mbnum_row * nMBPerRow;\n        /* No skipped mode in I-packets  3/1/2001    */\n        /* decode the DCT coeficients for the MB */\n        status = GetMBData_DataPart(video);\n        if (status != PV_SUCCESS)\n        {\n            /* Report the error to the application.   06/20/2000 */\n            VideoDecoderErrorDetected(video);\n            movePointerTo(stream, startSecondPart); /*  */\n            /* Conceal packet,  05/15/2000 */\n            ConcealTexture_I(video, startFirstPart, mb_start, mb_end, slice_counter);\n            return status;\n        }\n        video->usePrevQP = 1;           /*  04/27/01 should be set after decoding first MB */\n    }\n\n    valid_stuffing = validStuffing(stream);\n    if (!valid_stuffing)\n    {\n        VideoDecoderErrorDetected(video);\n        movePointerTo(stream, startSecondPart);\n        ConcealTexture_I(video, startFirstPart, mb_start, mb_end, slice_counter);\n        return PV_FAIL;\n    }\n    return PV_SUCCESS;\n}\n\n\n/* ======================================================================== */\n/*  Function : DecodeDataPart_P_VideoPacket()                               */\n/*  Date     : 04/25/2000                                                   */\n/*  Purpose  : Decode Data Partitioned Mode Video Packet in P-VOP           */\n/*  In/out   :                                                              */\n/*  Return   : PV_SUCCESS if successed, PV_FAIL if failed.                  */\n/*  Modified :   09/18/2000,  fast VlcDecode+Dequant                        */\n/*              04/13/2001,  fixed MB_stuffing, new ACDC pred structure,  */\n/*                              cleanup                                     */\n/*              08/07/2001,  remove MBzero                              */\n/* ======================================================================== */\nPV_STATUS DecodeDataPart_P_VideoPacket(VideoDecData *video, int slice_counter)\n{\n    PV_STATUS status;\n    uint8 *Mode = video->headerInfo.Mode;\n    BitstreamDecVideo *stream = video->bitstream;\n    int nTotalMB = video->nTotalMB;\n    int mbnum, mb_start, mb_end;\n    int16 QP, *QPMB = video->QPMB;\n    int MBtype, CBPY;\n    Bool valid_stuffing;\n    int intra_MB;\n    uint32 tmpvar;\n    uint code;\n    int32  startFirstPart, startSecondPart;\n    int nMBPerRow = video->nMBPerRow;\n    uint8 *pbyte;\n    /* decode the first partition */\n    startFirstPart = getPointer(stream);\n    mb_start = video->mbnum;\n    video->usePrevQP = 0;            /*  04/27/01 */\n\n    BitstreamShowBits16(stream, 10, &code);\n    while (code == 1)\n    {\n        PV_BitstreamFlushBits(stream, 10);\n        BitstreamShowBits16(stream, 10, &code);\n    }\n\n    do\n    {\n        /* decode COD, MCBPC, ACpred_flag, CPBY and DQUANT */\n        /* We have to discard stuffed MB header */\n\n        status = GetMBheaderDataPart_P(video);\n\n        if (status != PV_SUCCESS)\n        {\n            /* Report the error to the application.   06/20/2000 */\n            VideoDecoderErrorDetected(video);\n            movePointerTo(stream, startFirstPart);\n            video->mbnum = mb_start;\n            return PV_FAIL;\n        }\n\n        /* we must update slice_counter before motion vector decoding.   */\n        video->sliceNo[video->mbnum] = (uint8) slice_counter;\n\n        if (Mode[video->mbnum] & INTER_MASK) /* INTER || INTER_Q || INTER_4V */\n        {\n            /* decode the motion vector (if there are any) */\n            status = PV_GetMBvectors(video, Mode[video->mbnum]);\n            if (status != PV_SUCCESS)\n            {\n                /* Report the error to the application.   06/20/2000 */\n                VideoDecoderErrorDetected(video);\n                movePointerTo(stream, startFirstPart);\n                video->mbnum = mb_start;\n                return PV_FAIL;\n            }\n        }\n        video->mbnum++;\n\n        video->mbnum_row = PV_GET_ROW(video->mbnum, nMBPerRow);   /*  This is needed if mbnum is read from the packet header */\n        video->mbnum_col = video->mbnum - video->mbnum_row * nMBPerRow;\n\n        BitstreamShowBits16(stream, 10, &code);\n        while (code == 1)\n        {\n            PV_BitstreamFlushBits(stream, 10);\n            BitstreamShowBits16(stream, 10, &code);\n        }\n        /* have we reached the end of the video packet or vop? */\n        status = BitstreamShowBits32(stream, MOTION_MARKER_COMB_LENGTH, &tmpvar);\n        /*      if (status != PV_SUCCESS && status != PV_END_OF_BUFFER) return status;  */\n    }\n    while (tmpvar != MOTION_MARKER_COMB && video->mbnum < nTotalMB);\n\n    if (tmpvar == MOTION_MARKER_COMB)\n    {\n        PV_BitstreamFlushBits(stream, MOTION_MARKER_COMB_LENGTH);\n    }\n    else\n    {\n        status = quickSearchMotionMarker(stream);\n        if (status == PV_SUCCESS)\n        {\n            /* only way you can end up being here is in the last packet,and there is stuffing at\n            the end of the first partition */\n            PV_BitstreamFlushBits(stream, MOTION_MARKER_COMB_LENGTH);\n        }\n        else\n        {\n            /* Report the error to the application.   06/20/2000 */\n            VideoDecoderErrorDetected(video);\n            movePointerTo(stream, startFirstPart);\n            video->mbnum = mb_start;\n            /* concealment will be taken care of in the upper layer  */\n            return PV_FAIL;\n        }\n    }\n\n    /* decode the second partition */\n    startSecondPart = getPointer(stream);\n    QP = video->currVop->quantizer;\n\n    mb_end = video->mbnum;\n\n    for (mbnum = mb_start; mbnum < mb_end; mbnum++)\n    {\n        MBtype = Mode[mbnum];\n\n        if (MBtype == MODE_SKIPPED)\n        {\n            QPMB[mbnum] = QP; /*  03/01/01 */\n            continue;\n        }\n        intra_MB = (MBtype & INTRA_MASK); /* (MBtype == MODE_INTRA || MBtype == MODE_INTRA_Q) */\n        video->mbnum = mbnum;\n        video->mbnum_row = PV_GET_ROW(mbnum, nMBPerRow);   /*  This is needed if nbnum is read from the packet header */\n        video->mbnum_col = mbnum - video->mbnum_row * nMBPerRow;\n\n        /* there is always acdcprediction in DataPart mode    04/10/01 */\n        if (intra_MB)\n        {\n            video->acPredFlag[mbnum] = (uint8) BitstreamRead1Bits_INLINE(stream);\n        }\n\n        CBPY = PV_VlcDecCBPY(stream, intra_MB);\n        if (CBPY < 0)\n        {\n            /* Report the error to the application.   06/20/2000 */\n            VideoDecoderErrorDetected(video);\n            /* Conceal second partition,  5/15/2000 */\n            movePointerTo(stream, startSecondPart);\n            ConcealTexture_P(video, mb_start, mb_end, slice_counter);\n            return PV_FAIL;\n        }\n\n        video->headerInfo.CBP[mbnum] |= (uint8)(CBPY << 2);\n        if (intra_MB || MBtype == MODE_INTER_Q)                     /*  04/26/01 */\n        {\n            status = GetMBheaderDataPart_DQUANT_DC(video, &QP);\n            if (status != PV_SUCCESS) return status;\n        }\n        video->usePrevQP = 1;        /*  04/27/01 */\n        QPMB[mbnum] = QP;\n    }\n\n    video->usePrevQP = 0;  /*  04/27/01 */\n\n    for (mbnum = mb_start; mbnum < mb_end; mbnum++)\n    {\n        video->mbnum = mbnum;\n        video->mbnum_row = PV_GET_ROW(mbnum, nMBPerRow);  /*  This is needed if nbnum is read from the packet header */\n        video->mbnum_col = mbnum - video->mbnum_row * nMBPerRow;\n\n\n        if (Mode[mbnum] != MODE_SKIPPED)\n        {\n            /* decode the DCT coeficients for the MB */\n            status = GetMBData_DataPart(video);\n            if (status != PV_SUCCESS)\n            {\n                /* Report the error to the application.   06/20/2000 */\n                VideoDecoderErrorDetected(video);\n\n                /* Conceal second partition,  5/15/2000 */\n                movePointerTo(stream, startSecondPart);\n                ConcealTexture_P(video, mb_start, mb_end, slice_counter);\n                return status;\n            }\n            video->usePrevQP = 1;  /*  04/27/01 */\n        }\n        else\n        {   // SKIPPED\n\n            /* Motion compensation and put it to video->mblock->pred_block */\n            SkippedMBMotionComp(video);\n\n            //oscl_memset(video->predDCAC_row + video->mbnum_col, 0, sizeof(typeDCACStore)); /*  SKIPPED_ACDC */\n            //oscl_memset(video->predDCAC_col, 0, sizeof(typeDCACStore));\n            /*  08/08/2005 */\n            pbyte = (uint8*)(video->predDCAC_row + video->mbnum_col);\n            ZERO_OUT_64BYTES(pbyte);\n            pbyte = (uint8*)(video->predDCAC_col);\n            ZERO_OUT_64BYTES(pbyte);\n\n        }\n    }\n\n    valid_stuffing = validStuffing(stream);   /*  */\n    if (!valid_stuffing)\n    {\n        VideoDecoderErrorDetected(video);\n        movePointerTo(stream, startSecondPart); /*  */\n        ConcealTexture_P(video, mb_start, mb_end, slice_counter);\n\n        return PV_FAIL;\n    }\n    return PV_SUCCESS;\n}\n\n\n/* ======================================================================== */\n/*  Function : GetMBheaderDataPart_DQUANT_DC()                              */\n/*  Date     : 04/26/2000                                                   */\n/*  Purpose  : Decode DQUANT and DC in Data Partitioned Mode for both       */\n/*             I-VOP and P-VOP.                                             */\n/*  In/out   :                                                              */\n/*  Return   : PV_SUCCESS if successed, PV_FAIL if failed.                  */\n/*  Modified : 02/13/2001 new ACDC prediction structure,        */\n/*                                       cleanup                            */\n/* ======================================================================== */\nPV_STATUS GetMBheaderDataPart_DQUANT_DC(VideoDecData *video, int16 *QP)\n{\n    PV_STATUS status = PV_SUCCESS;\n    BitstreamDecVideo *stream = video->bitstream;\n    int mbnum = video->mbnum;\n    int intra_dc_vlc_thr = video->currVop->intraDCVlcThr;\n    uint8 *Mode = video->headerInfo.Mode;\n    int  MBtype = Mode[mbnum];\n    typeDCStore *DC = video->predDC + mbnum;\n    int  comp;\n    Bool switched;\n    uint  DQUANT;\n    int16 QP_tmp;\n\n    const static int  DQ_tab[4] = { -1, -2, 1, 2};\n\n    if (MBtype & Q_MASK)             /* INTRA_Q || INTER_Q */\n    {\n        DQUANT = BitstreamReadBits16(stream, 2);\n        *QP += DQ_tab[DQUANT];\n\n        if (*QP < 1) *QP = 1;\n        else if (*QP > 31) *QP = 31;\n    }\n    if (MBtype & INTRA_MASK)  /* INTRA || INTRA_Q */ /* no switch, code DC separately */\n    {\n        QP_tmp = *QP;                      /* running QP  04/26/01*/\n        switched = 0;\n        if (intra_dc_vlc_thr)                 /*  04/27/01 */\n        {\n            if (video->usePrevQP)\n                QP_tmp = video->QPMB[mbnum-1];\n            switched = (intra_dc_vlc_thr == 7 || QP_tmp >= intra_dc_vlc_thr * 2 + 11);\n        }\n        if (!switched)\n        {\n            for (comp = 0; comp < 6; comp++)\n            {\n                status = PV_DecodePredictedIntraDC(comp, stream, (*DC + comp));   /*  03/01/01 */\n                if (status != PV_SUCCESS) return PV_FAIL;\n            }\n        }\n        else\n        {\n            for (comp = 0; comp < 6; comp++)\n            {\n                (*DC)[comp] = 0;   /*  04/26/01 needed for switched case*/\n            }\n        }\n    }\n    return status;\n}\n\n\n/***********************************************************CommentBegin******\n*       04/25/2000 : Initial modification to the new PV Lib format.\n*       04/17/2001 : new ACDC pred structure\n***********************************************************CommentEnd********/\nPV_STATUS GetMBheaderDataPart_P(VideoDecData *video)\n{\n    BitstreamDecVideo *stream = video->bitstream;\n    int mbnum = video->mbnum;\n    uint8 *Mode = video->headerInfo.Mode;\n    typeDCStore *DC = video->predDC + mbnum;\n    uint no_dct_flag;\n    int comp;\n    int MCBPC;\n\n    no_dct_flag = BitstreamRead1Bits_INLINE(stream);\n\n    if (no_dct_flag)\n    {\n        /* skipped macroblock */\n        Mode[mbnum] = MODE_SKIPPED;\n\n        for (comp = 0; comp < 6; comp++)\n        {\n            (*DC)[comp] = mid_gray;\n            /*  ACDC REMOVE AC coefs are set in DecodeDataPart_P */\n        }\n    }\n    else\n    {\n        /* coded macroblock */\n        MCBPC = PV_VlcDecMCBPC_com_inter(stream);\n\n        if (VLC_ERROR_DETECTED(MCBPC))\n        {\n            return PV_FAIL;\n        }\n\n        Mode[mbnum] = (uint8)MBtype_mode[MCBPC & 7];\n        video->headerInfo.CBP[mbnum] = (uint8)((MCBPC >> 4) & 3);\n    }\n\n    return PV_SUCCESS;\n}\n\n\n/***********************************************************CommentBegin******\n*       04/17/01  new ACDC pred structure, reorganized code, cleanup\n***********************************************************CommentEnd********/\nPV_STATUS GetMBData_DataPart(VideoDecData *video)\n{\n    int mbnum = video->mbnum;\n    int16 *dataBlock;\n    MacroBlock *mblock = video->mblock;\n    int QP = video->QPMB[mbnum];\n    int32 offset;\n    PIXEL *c_comp;\n    int width = video->width;\n    int intra_dc_vlc_thr = video->currVop->intraDCVlcThr;\n    uint CBP = video->headerInfo.CBP[mbnum];\n    uint8 mode = video->headerInfo.Mode[mbnum];\n    int x_pos = video->mbnum_col;\n    typeDCStore *DC = video->predDC + mbnum;\n    int  ncoeffs[6], *no_coeff = mblock->no_coeff;\n    int  comp;\n    Bool  switched;\n    int QP_tmp = QP;\n\n    int y_pos = video->mbnum_row;\n#ifdef PV_POSTPROC_ON\n    uint8 *pp_mod[6];\n    int TotalMB = video->nTotalMB;\n    int MB_in_width = video->nMBPerRow;\n#endif\n\n\n\n    /*****\n    *     Decoding of the 6 blocks (depending on transparent pattern)\n    *****/\n#ifdef PV_POSTPROC_ON\n    if (video->postFilterType != PV_NO_POST_PROC)\n    {\n        /** post-processing ***/\n        pp_mod[0] = video->pstprcTypCur + (y_pos << 1) * (MB_in_width << 1) + (x_pos << 1);\n        pp_mod[1] = pp_mod[0] + 1;\n        pp_mod[2] = pp_mod[0] + (MB_in_width << 1);\n        pp_mod[3] = pp_mod[2] + 1;\n        pp_mod[4] = video->pstprcTypCur + (TotalMB << 2) + mbnum;\n        pp_mod[5] = pp_mod[4] + TotalMB;\n    }\n#endif\n\n    /*  oscl_memset(mblock->block, 0, sizeof(typeMBStore));    Aug 9,2005 */\n\n    if (mode & INTRA_MASK) /* MODE_INTRA || mode == MODE_INTRA_Q */\n    {\n        switched = 0;\n        if (intra_dc_vlc_thr)\n        {\n            if (video->usePrevQP)\n                QP_tmp = video->QPMB[mbnum-1];   /* running QP  04/26/01 */\n\n            switched = (intra_dc_vlc_thr == 7 || QP_tmp >= intra_dc_vlc_thr * 2 + 11);\n        }\n\n        mblock->DCScalarLum = cal_dc_scaler(QP, LUMINANCE_DC_TYPE);     /*   ACDC 03/01/01 */\n        mblock->DCScalarChr = cal_dc_scaler(QP, CHROMINANCE_DC_TYPE);\n\n        for (comp = 0; comp < 6; comp++)\n        {\n            dataBlock = mblock->block[comp];    /*, 10/20/2000 */\n\n            dataBlock[0] = (*DC)[comp];\n\n            ncoeffs[comp] = VlcDequantH263IntraBlock(video, comp,\n                            switched, mblock->bitmapcol[comp], &mblock->bitmaprow[comp]);\n\n            if (VLC_ERROR_DETECTED(ncoeffs[comp]))         /*  */\n            {\n                if (switched)\n                    return PV_FAIL;\n                else\n                {\n                    ncoeffs[comp] = 1;\n                    oscl_memset((dataBlock + 1), 0, sizeof(int16)*63);\n                }\n            }\n            no_coeff[comp] = ncoeffs[comp];\n            /*  modified to new semaphore for post-proc */\n            // Future work:: can be combined in the dequant function\n            // @todo Deblocking Semaphore for INTRA block\n#ifdef PV_POSTPROC_ON\n            if (video->postFilterType != PV_NO_POST_PROC)\n                *pp_mod[comp] = (uint8) PostProcSemaphore(dataBlock);\n#endif\n        }\n        MBlockIDCT(video);\n    }\n    else /* MODE INTER*/\n    {\n\n\n\n\n        MBMotionComp(video, CBP);\n        offset = (int32)(y_pos << 4) * width + (x_pos << 4);\n        c_comp  = video->currVop->yChan + offset;\n\n\n        for (comp = 0; comp < 4; comp++)\n        {\n            (*DC)[comp] = mid_gray;\n\n            if (CBP & (1 << (5 - comp)))\n            {\n                ncoeffs[comp] = VlcDequantH263InterBlock(video, comp,\n                                mblock->bitmapcol[comp], &mblock->bitmaprow[comp]);\n                if (VLC_ERROR_DETECTED(ncoeffs[comp]))\n                    return PV_FAIL;\n\n\n                BlockIDCT(c_comp + (comp&2)*(width << 2) + 8*(comp&1), mblock->pred_block + (comp&2)*64 + 8*(comp&1), mblock->block[comp], width, ncoeffs[comp],\n                          mblock->bitmapcol[comp], mblock->bitmaprow[comp]);\n\n            }\n            else\n            {\n                ncoeffs[comp] = 0;\n            }\n\n            /*  @todo Deblocking Semaphore for INTRA block, for inter just test for ringing  */\n#ifdef PV_POSTPROC_ON\n            if (video->postFilterType != PV_NO_POST_PROC)\n                *pp_mod[comp] = (uint8)((ncoeffs[comp] > 3) ? 4 : 0);\n#endif\n        }\n\n        (*DC)[4] = mid_gray;\n        if (CBP & 2)\n        {\n            ncoeffs[4] = VlcDequantH263InterBlock(video, 4,\n                                                  mblock->bitmapcol[4], &mblock->bitmaprow[4]);\n            if (VLC_ERROR_DETECTED(ncoeffs[4]))\n                return PV_FAIL;\n\n            BlockIDCT(video->currVop->uChan + (offset >> 2) + (x_pos << 2), mblock->pred_block + 256, mblock->block[4], width >> 1, ncoeffs[4],\n                      mblock->bitmapcol[4], mblock->bitmaprow[4]);\n\n        }\n        else\n        {\n            ncoeffs[4] = 0;\n        }\n#ifdef PV_POSTPROC_ON\n        if (video->postFilterType != PV_NO_POST_PROC)\n            *pp_mod[4] = (uint8)((ncoeffs[4] > 3) ? 4 : 0);\n#endif\n        (*DC)[5] = mid_gray;\n        if (CBP & 1)\n        {\n            ncoeffs[5] = VlcDequantH263InterBlock(video, 5,\n                                                  mblock->bitmapcol[5], &mblock->bitmaprow[5]);\n            if (VLC_ERROR_DETECTED(ncoeffs[5]))\n                return PV_FAIL;\n\n            BlockIDCT(video->currVop->vChan + (offset >> 2) + (x_pos << 2), mblock->pred_block + 264, mblock->block[5], width >> 1, ncoeffs[5],\n                      mblock->bitmapcol[5], mblock->bitmaprow[5]);\n\n        }\n        else\n        {\n            ncoeffs[5] = 0;\n        }\n#ifdef PV_POSTPROC_ON\n        if (video->postFilterType != PV_NO_POST_PROC)\n            *pp_mod[5] = (uint8)((ncoeffs[5] > 3) ? 4 : 0);\n#endif\n\n\n\n\n        /* Motion compensation and put it to video->mblock->pred_block */\n    }\n    return PV_SUCCESS;\n}\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/dcac_prediction.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n\n/*----------------------------------------------------------------------------\n; INCLUDES\n----------------------------------------------------------------------------*/\n#include \"mp4dec_lib.h\"\n#include \"vlc_decode.h\"\n#include \"bitstream.h\"\n#include \"zigzag.h\"\n#include \"scaling.h\"\n\nvoid    doDCACPrediction(\n    VideoDecData *video,\n    int comp,\n    int16 *q_block,\n    int *direction\n)\n{\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    int i;\n    int mbnum = video->mbnum;\n    int nMBPerRow = video->nMBPerRow;\n    int x_pos = video->mbnum_col;\n    int y_pos = video->mbnum_row;\n    int16 *AC_tmp;\n    int QP_tmp;\n    int16 *QP_store = video->QPMB + mbnum;\n    int QP = video->QPMB[mbnum];\n    int QP_half = QP >> 1;\n    int32 val;\n    int flag_0 = FALSE, flag_1 = FALSE;\n    uint8 *slice_nb = video->sliceNo;\n    typeDCStore *DC_store = video->predDC + mbnum;\n    typeDCACStore *DCAC_row = video->predDCAC_row + x_pos;\n    typeDCACStore *DCAC_col = video->predDCAC_col;\n\n    uint ACpred_flag = (uint) video->acPredFlag[mbnum];\n\n    int left_bnd, up_bnd;\n\n    static const int Xpos[6] = { -1, 0, -1, 0, -1, -1};\n    static const int Ypos[6] = { -1, -1, 0, 0, -1, -1};\n\n    static const int Xtab[6] = {1, 0, 3, 2, 4, 5};\n    static const int Ytab[6] = {2, 3, 0, 1, 4, 5};\n    static const int Ztab[6] = {3, 2, 1, 0, 4, 5};\n\n    /* I added these to speed up comparisons */\n    static const int Pos0[6] = { 1, 1, 0, 0, 1, 1};\n    static const int Pos1[6] = { 1, 0, 1, 0, 1, 1};\n\n    static const int B_Xtab[6] = {0, 1, 0, 1, 2, 3};\n    static const int B_Ytab[6] = {0, 0, 1, 1, 2, 3};\n\n//  int *direction;     /* 0: HORIZONTAL, 1: VERTICAL */\n    int block_A, block_B, block_C;\n    int DC_pred;\n    int y_offset, x_offset, x_tab, y_tab, z_tab;    /* speedup coefficients */\n    int b_xtab, b_ytab;\n\n    if (!comp && x_pos && !(video->headerInfo.Mode[mbnum-1]&INTRA_MASK)) /* not intra */\n    {\n        oscl_memset(DCAC_col, 0, sizeof(typeDCACStore));\n    }\n    if (!comp && y_pos && !(video->headerInfo.Mode[mbnum-nMBPerRow]&INTRA_MASK)) /* not intra */\n    {\n        oscl_memset(DCAC_row, 0, sizeof(typeDCACStore));\n    }\n\n    y_offset = Ypos[comp] * nMBPerRow;\n    x_offset = Xpos[comp];\n    x_tab = Xtab[comp];\n    y_tab = Ytab[comp];\n    z_tab = Ztab[comp];\n\n    b_xtab = B_Xtab[comp];\n    b_ytab = B_Ytab[comp];\n\n    /*----------------------------------------------------------------------------\n    ; Function body here\n    ----------------------------------------------------------------------------*/\n    /* Find the direction of prediction and the DC prediction */\n\n    if (x_pos == 0 && y_pos == 0)\n    {   /* top left corner */\n        block_A = (comp == 1 || comp == 3) ? flag_0 = TRUE, DC_store[0][x_tab] : mid_gray;\n        block_B = (comp == 3) ? DC_store[x_offset][z_tab] : mid_gray;\n        block_C = (comp == 2 || comp == 3) ? flag_1 = TRUE, DC_store[0][y_tab] : mid_gray;\n    }\n    else if (x_pos == 0)\n    {   /* left edge */\n        up_bnd   = Pos0[comp] && slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow];\n\n        block_A = (comp == 1 || comp == 3) ? flag_0 = TRUE, DC_store[0][x_tab] : mid_gray;\n        block_B = ((comp == 1 && up_bnd) || comp == 3) ?  DC_store[y_offset+x_offset][z_tab] : mid_gray;\n        block_C = (comp == 2 || comp == 3 || up_bnd) ? flag_1 = TRUE, DC_store[y_offset][y_tab] : mid_gray;\n    }\n    else if (y_pos == 0)\n    { /* top row */\n        left_bnd = Pos1[comp] && slice_nb[mbnum] == slice_nb[mbnum-1];\n\n        block_A = (comp == 1 || comp == 3 || left_bnd) ? flag_0 = TRUE, DC_store[x_offset][x_tab] : mid_gray;\n        block_B = ((comp == 2 && left_bnd) || comp == 3) ? DC_store[y_offset + x_offset][z_tab] : mid_gray;\n        block_C = (comp == 2 || comp == 3) ? flag_1 = TRUE, DC_store[y_offset][y_tab] : mid_gray;\n    }\n    else\n    {\n        up_bnd   = Pos0[comp] && slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow];\n        left_bnd = Pos1[comp] && slice_nb[mbnum] == slice_nb[mbnum-1];\n\n        block_A = (comp == 1 || comp == 3 || left_bnd) ? flag_0 = TRUE, DC_store[x_offset][x_tab] : mid_gray;\n        block_B = (((comp == 0 || comp == 4 || comp == 5) && slice_nb[mbnum] == slice_nb[mbnum-1-nMBPerRow]) ||\n                   (comp == 1 && up_bnd) || (comp == 2 && left_bnd) || (comp == 3)) ? DC_store[y_offset+x_offset][z_tab] : mid_gray;\n        block_C = (comp == 2 || comp == 3 || up_bnd) ? flag_1 = TRUE, DC_store[y_offset][y_tab] : mid_gray;\n    }\n\n\n    if ((PV_ABS((block_A - block_B))) < (PV_ABS((block_B - block_C))))\n    {\n        DC_pred = block_C;\n        *direction = 1;\n        if (ACpred_flag == 1)\n        {\n            if (flag_1)\n            {\n                AC_tmp = DCAC_row[0][b_xtab];\n                QP_tmp = QP_store[y_offset];\n                if (QP_tmp == QP)\n                {\n                    for (i = 1; i < 8; i++)\n                    {\n                        q_block[i] = *AC_tmp++;\n                    }\n                }\n                else\n                {\n                    for (i = 1; i < 8; i++)\n                    {\n                        val = (int32)(*AC_tmp++) * QP_tmp;\n                        q_block[i] = (val < 0) ? (int16)((val - QP_half) / QP) : (int16)((val + QP_half) / QP);\n                        /* Vertical, top ROW of block C */\n                    }\n                }\n            }\n        }\n    }\n    else\n    {\n        DC_pred = block_A;\n        *direction = 0;\n        if (ACpred_flag == 1)\n        {\n            if (flag_0)\n            {\n                AC_tmp = DCAC_col[0][b_ytab];\n                QP_tmp = QP_store[x_offset];\n                if (QP_tmp == QP)\n                {\n                    for (i = 1; i < 8; i++)\n                    {\n                        q_block[i<<3] = *AC_tmp++;\n                    }\n                }\n                else\n                {\n                    for (i = 1; i < 8; i++)\n                    {\n                        val = (int32)(*AC_tmp++) * QP_tmp;\n                        q_block[i<<3] = (val < 0) ? (int16)((val - QP_half) / QP) : (int16)((val + QP_half) / QP);\n                        /* Vertical, top ROW of block C */\n                    }\n                }\n            }\n        }\n    }\n\n    /* Now predict the DC coefficient */\n    QP_tmp = (comp < 4) ? video->mblock->DCScalarLum : video->mblock->DCScalarChr;\n    q_block[0] += (int16)((DC_pred + (QP_tmp >> 1)) * scale[QP_tmp] >> 18);\n//      q_block[0] += (DC_pred+(QP_tmp>>1))/QP_tmp;\n\n    /*----------------------------------------------------------------------------\n    ; Return nothing or data or data pointer\n    ----------------------------------------------------------------------------*/\n    return;\n}\n#ifdef PV_ANNEX_IJKT_SUPPORT\nvoid    doDCACPrediction_I(\n    VideoDecData *video,\n    int comp,\n    int16 *q_block\n)\n{\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    int mbnum = video->mbnum;\n    int nMBPerRow = video->nMBPerRow;\n    int x_pos = video->mbnum_col;\n    int y_pos = video->mbnum_row;\n    int16 *AC_tmp;\n    int flag_0 = FALSE, flag_1 = FALSE;\n    uint8 *slice_nb = video->sliceNo;\n    typeDCStore *DC_store = video->predDC + mbnum;\n    typeDCACStore *DCAC_row = video->predDCAC_row + x_pos;\n    typeDCACStore *DCAC_col = video->predDCAC_col;\n    int left_bnd, up_bnd;\n    uint8 *mode = video->headerInfo.Mode;\n    uint ACpred_flag = (uint) video->acPredFlag[mbnum];\n\n\n\n    static const int Xpos[6] = { -1, 0, -1, 0, -1, -1};\n    static const int Ypos[6] = { -1, -1, 0, 0, -1, -1};\n\n    static const int Xtab[6] = {1, 0, 3, 2, 4, 5};\n    static const int Ytab[6] = {2, 3, 0, 1, 4, 5};\n\n    /* I added these to speed up comparisons */\n    static const int Pos0[6] = { 1, 1, 0, 0, 1, 1};\n    static const int Pos1[6] = { 1, 0, 1, 0, 1, 1};\n\n    static const int B_Xtab[6] = {0, 1, 0, 1, 2, 3};\n    static const int B_Ytab[6] = {0, 0, 1, 1, 2, 3};\n\n//  int *direction;     /* 0: HORIZONTAL, 1: VERTICAL */\n    int block_A, block_C;\n    int y_offset, x_offset, x_tab, y_tab;   /* speedup coefficients */\n    int b_xtab, b_ytab;\n    y_offset = Ypos[comp] * nMBPerRow;\n    x_offset = Xpos[comp];\n    x_tab = Xtab[comp];\n    y_tab = Ytab[comp];\n\n    b_xtab = B_Xtab[comp];\n    b_ytab = B_Ytab[comp];\n\n    /*----------------------------------------------------------------------------\n    ; Function body here\n    ----------------------------------------------------------------------------*/\n    /* Find the direction of prediction and the DC prediction */\n\n    if (x_pos == 0 && y_pos == 0)\n    {   /* top left corner */\n        block_A = (comp == 1 || comp == 3) ? flag_0 = TRUE, DC_store[0][x_tab] : mid_gray;\n        block_C = (comp == 2 || comp == 3) ? flag_1 = TRUE, DC_store[0][y_tab] : mid_gray;\n    }\n    else if (x_pos == 0)\n    {   /* left edge */\n        up_bnd   = (Pos0[comp] && slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow])\n                   && (mode[mbnum-nMBPerRow] == MODE_INTRA || mode[mbnum-nMBPerRow] == MODE_INTRA_Q);;\n\n        block_A = (comp == 1 || comp == 3) ? flag_0 = TRUE, DC_store[0][x_tab] : mid_gray;\n        block_C = (comp == 2 || comp == 3 || up_bnd) ? flag_1 = TRUE, DC_store[y_offset][y_tab] : mid_gray;\n    }\n    else if (y_pos == 0)\n    { /* top row */\n        left_bnd = (Pos1[comp] && slice_nb[mbnum] == slice_nb[mbnum-1])\n                   && (mode[mbnum-1] == MODE_INTRA || mode[mbnum-1] == MODE_INTRA_Q);\n\n        block_A = (comp == 1 || comp == 3 || left_bnd) ? flag_0 = TRUE, DC_store[x_offset][x_tab] : mid_gray;\n        block_C = (comp == 2 || comp == 3) ? flag_1 = TRUE, DC_store[y_offset][y_tab] : mid_gray;\n    }\n    else\n    {\n        up_bnd   = (Pos0[comp] && slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow])\n                   && (mode[mbnum-nMBPerRow] == MODE_INTRA || mode[mbnum-nMBPerRow] == MODE_INTRA_Q);\n        left_bnd = (Pos1[comp] && slice_nb[mbnum] == slice_nb[mbnum-1])\n                   && (mode[mbnum-1] == MODE_INTRA || mode[mbnum-1] == MODE_INTRA_Q);\n\n        block_A = (comp == 1 || comp == 3 || left_bnd) ? flag_0 = TRUE, DC_store[x_offset][x_tab] : mid_gray;\n        block_C = (comp == 2 || comp == 3 || up_bnd) ? flag_1 = TRUE, DC_store[y_offset][y_tab] : mid_gray;\n    }\n\n    if (ACpred_flag == 0)\n    {\n        if (flag_0 == TRUE)\n        {\n            if (flag_1 == TRUE)\n            {\n                q_block[0] = (int16)((block_A + block_C) >> 1);\n            }\n            else\n            {\n                q_block[0] = (int16)block_A;\n            }\n        }\n        else\n        {\n            if (flag_1 == TRUE)\n            {\n                q_block[0] = (int16)block_C;\n            }\n            else\n            {\n                q_block[0] = mid_gray;\n            }\n        }\n\n    }\n    else\n    {\n        if (video->mblock->direction == 1)\n        {\n            if (flag_1 == TRUE)\n            {\n                q_block[0] = (int16)block_C;\n\n                AC_tmp = DCAC_row[0][b_xtab];\n                q_block[1] = AC_tmp[0];\n                q_block[2] = AC_tmp[1];\n                q_block[3] = AC_tmp[2];\n                q_block[4] = AC_tmp[3];\n                q_block[5] = AC_tmp[4];\n                q_block[6] = AC_tmp[5];\n                q_block[7] = AC_tmp[6];\n            }\n            else\n            {\n                q_block[0] = mid_gray;\n            }\n        }\n        else\n        {\n            if (flag_0 == TRUE)\n            {\n                q_block[0] = (int16)block_A;\n\n                AC_tmp = DCAC_col[0][b_ytab];\n                q_block[8] = AC_tmp[0];\n                q_block[16] = AC_tmp[1];\n                q_block[24] = AC_tmp[2];\n                q_block[32] = AC_tmp[3];\n                q_block[40] = AC_tmp[4];\n                q_block[48] = AC_tmp[5];\n                q_block[56] = AC_tmp[6];\n            }\n            else\n            {\n                q_block[0] = mid_gray;\n            }\n        }\n    }\n    /*----------------------------------------------------------------------------\n    ; Return nothing or data or data pointer\n    ----------------------------------------------------------------------------*/\n    return;\n}\n#endif\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/dec_pred_intra_dc.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"mp4dec_lib.h\"\n#include \"vlc_decode.h\"\n#include \"bitstream.h\"\n#include \"zigzag.h\"\n\nPV_STATUS PV_DecodePredictedIntraDC(\n    int compnum,\n    BitstreamDecVideo *stream,\n    int16 *INTRADC_delta)\n{\n\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    PV_STATUS status = PV_SUCCESS;\n    uint DC_size;\n    uint code;\n    int first_bit;\n\n    /*----------------------------------------------------------------------------\n    ; Function body here\n    ----------------------------------------------------------------------------*/\n    /* read DC size 2 - 8 bits */\n    status = PV_VlcDecIntraDCPredSize(stream, compnum, &DC_size);\n\n    if (status == PV_SUCCESS)\n    {\n        if (DC_size == 0)\n        {\n            *INTRADC_delta = 0;\n        }\n        else\n        {\n            /* read delta DC 0 - 8 bits */\n            code = (int) BitstreamReadBits16_INLINE(stream, DC_size);\n\n            first_bit = code >> (DC_size - 1);\n\n            if (first_bit == 0)\n            {\n                /* negative delta INTRA DC */\n                *INTRADC_delta = code ^((1 << DC_size) - 1);\n                *INTRADC_delta = -(*INTRADC_delta);\n            }\n            else\n            { /* positive delta INTRA DC */\n                *INTRADC_delta = code;\n            }\n            if (DC_size > 8) BitstreamRead1Bits_INLINE(stream);\n        }\n    }\n\n    /*----------------------------------------------------------------------------\n    ; Return nothing or data or data pointer\n    ----------------------------------------------------------------------------*/\n    return status;\n}\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/deringing_chroma.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include    \"mp4dec_lib.h\"\n#include    \"post_proc.h\"\n\n#ifdef PV_POSTPROC_ON\n\nvoid Deringing_Chroma(\n    uint8 *Rec_C,\n    int width,\n    int height,\n    int16 *QP_store,\n    int Combined,\n    uint8 *pp_mod\n)\n{\n    OSCL_UNUSED_ARG(Combined);\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    int thres;\n    int v_blk, h_blk;\n    int max_diff;\n    int v_pel, h_pel;\n    int max_blk, min_blk;\n    int v0, h0;\n    uint8 *ptr;\n    int sum, sum1, incr;\n    int32 addr_v;\n    int sign_v[10], sum_v[10];\n    int *ptr2, *ptr3;\n    uint8 pelu, pelc, pell;\n    incr = width - BLKSIZE;\n\n    /*----------------------------------------------------------------------------\n    ; Function body here\n    ----------------------------------------------------------------------------*/\n    /* chrominance */\n    /* Do the first line (7 pixels at a time => Don't use MMX)*/\n    for (h_blk = 0; h_blk < width; h_blk += BLKSIZE)\n    {\n        max_diff = (QP_store[h_blk>>3] >> 2) + 4;\n        ptr = &Rec_C[h_blk];\n        max_blk = min_blk = *ptr;\n        FindMaxMin(ptr, &min_blk, &max_blk, width);\n        h0 = ((h_blk - 1) >= 1) ? (h_blk - 1) : 1;\n\n        if (max_blk - min_blk >= 4)\n        {\n            thres = (max_blk + min_blk + 1) >> 1;\n\n\n            for (v_pel = 1; v_pel < BLKSIZE - 1; v_pel++)\n            {\n                addr_v = (int32)v_pel * width;\n                ptr = &Rec_C[addr_v + h0 - 1];\n                ptr2 = &sum_v[0];\n                ptr3 = &sign_v[0];\n\n                pelu = *(ptr - width);\n                pelc = *ptr;\n                pell = *(ptr + width);\n                ptr++;\n                *ptr2++ = pelu + (pelc << 1) + pell;\n                *ptr3++ = INDEX(pelu, thres) + INDEX(pelc, thres) + INDEX(pell, thres);\n\n                pelu = *(ptr - width);\n                pelc = *ptr;\n                pell = *(ptr + width);\n                ptr++;\n                *ptr2++ = pelu + (pelc << 1) + pell;\n                *ptr3++ = INDEX(pelu, thres) + INDEX(pelc, thres) + INDEX(pell, thres);\n\n                for (h_pel = h0; h_pel < h_blk + BLKSIZE - 1; h_pel++)\n                {\n                    pelu = *(ptr - width);\n                    pelc = *ptr;\n                    pell = *(ptr + width);\n\n                    *ptr2 = pelu + (pelc << 1) + pell;\n                    *ptr3 = INDEX(pelu, thres) + INDEX(pelc, thres) + INDEX(pell, thres);\n\n                    sum1 = *(ptr3 - 2) + *(ptr3 - 1) + *ptr3;\n                    if (sum1 == 0 || sum1 == 9)\n                    {\n                        sum = (*(ptr2 - 2) + (*(ptr2 - 1) << 1) + *ptr2 + 8) >> 4;\n\n                        ptr--;\n                        if (PV_ABS(*ptr - sum) > max_diff)\n                        {\n                            if (sum > *ptr)\n                                sum = *ptr + max_diff;\n                            else\n                                sum = *ptr - max_diff;\n                        }\n                        *ptr++ = (uint8) sum;\n                    }\n                    ptr++;\n                    ptr2++;\n                    ptr3++;\n                }\n            }\n        }\n    }\n\n    for (v_blk = BLKSIZE; v_blk < height; v_blk += BLKSIZE)\n    {\n        v0 = v_blk - 1;\n        /* Do the first block (pixels=7 => No MMX) */\n        max_diff = (QP_store[((((int32)v_blk*width)>>3))>>3] >> 2) + 4;\n        ptr = &Rec_C[(int32)v_blk * width];\n        max_blk = min_blk = *ptr;\n        FindMaxMin(ptr, &min_blk, &max_blk, incr);\n\n        if (max_blk - min_blk >= 4)\n        {\n            thres = (max_blk + min_blk + 1) >> 1;\n\n            for (v_pel = v0; v_pel < v_blk + BLKSIZE - 1; v_pel++)\n            {\n                addr_v = v_pel * width;\n                ptr = &Rec_C[addr_v];\n                ptr2 = &sum_v[0];\n                ptr3 = &sign_v[0];\n\n                pelu = *(ptr - width);\n                pelc = *ptr;\n                pell = *(ptr + width);\n                ptr++;\n                *ptr2++ = pelu + (pelc << 1) + pell;\n                *ptr3++ = INDEX(pelu, thres) + INDEX(pelc, thres) + INDEX(pell, thres);\n\n                pelu = *(ptr - width);\n                pelc = *ptr;\n                pell = *(ptr + width);\n                ptr++;\n                *ptr2++ = pelu + (pelc << 1) + pell;\n                *ptr3++ = INDEX(pelu, thres) + INDEX(pelc, thres) + INDEX(pell, thres);\n\n                for (h_pel = 1; h_pel < BLKSIZE - 1; h_pel++)\n                {\n                    pelu = *(ptr - width);\n                    pelc = *ptr;\n                    pell = *(ptr + width);\n\n                    *ptr2 = pelu + (pelc << 1) + pell;\n                    *ptr3 = INDEX(pelu, thres) + INDEX(pelc, thres) + INDEX(pell, thres);\n\n                    sum1 = *(ptr3 - 2) + *(ptr3 - 1) + *ptr3;\n                    if (sum1 == 0 || sum1 == 9)\n                    {\n                        sum = (*(ptr2 - 2) + (*(ptr2 - 1) << 1) + *ptr2 + 8) >> 4;\n\n                        ptr--;\n                        if (PV_ABS(*ptr - sum) > max_diff)\n                        {\n                            if (sum > *ptr)\n                                sum = *ptr + max_diff;\n                            else\n                                sum = *ptr - max_diff;\n                        }\n                        *ptr++ = (uint8) sum;\n                    }\n                    ptr++;\n                    ptr2++;\n                    ptr3++;\n                }\n            }\n        }\n\n\n        /* Do the rest in MMX */\n        for (h_blk = BLKSIZE; h_blk < width; h_blk += BLKSIZE)\n        {\n            if ((pp_mod[(v_blk/8)*(width/8)+h_blk/8]&0x4) != 0)\n            {\n                max_diff = (QP_store[((((int32)v_blk*width)>>3)+h_blk)>>3] >> 2) + 4;\n                ptr = &Rec_C[(int32)v_blk * width + h_blk];\n                max_blk = min_blk = *ptr;\n                FindMaxMin(ptr, &min_blk, &max_blk, incr);\n                h0 = h_blk - 1;\n\n                if (max_blk - min_blk >= 4)\n                {\n                    thres = (max_blk + min_blk + 1) >> 1;\n#ifdef NoMMX\n                    AdaptiveSmooth_NoMMX(Rec_C, v0, h0, v_blk, h_blk, thres, width, max_diff);\n#else\n                    DeringAdaptiveSmoothMMX(&Rec_C[(int32)v0*width+h0], width, thres, max_diff);\n#endif\n                }\n            }\n        }\n    } /* macroblock level */\n\n    /*----------------------------------------------------------------------------\n    ; Return nothing or data or data pointer\n    ----------------------------------------------------------------------------*/\n    return;\n}\n#endif\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/deringing_luma.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include    \"mp4dec_lib.h\"\n#include    \"post_proc.h\"\n\n#ifdef PV_POSTPROC_ON\n\nvoid Deringing_Luma(\n    uint8 *Rec_Y,\n    int width,\n    int height,\n    int16 *QP_store,\n    int Combined,\n    uint8 *pp_mod)\n{\n    OSCL_UNUSED_ARG(Combined);\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    int thres[4], range[4], max_range_blk, max_thres_blk;\n    int MB_V, MB_H, BLK_V, BLK_H;\n    int v_blk, h_blk;\n    int max_diff;\n    int max_blk, min_blk;\n    int v0, h0;\n    uint8 *ptr;\n    int thr, blks, incr;\n    int mb_indx, blk_indx;\n\n    /*----------------------------------------------------------------------------\n    ; Function body here\n    ----------------------------------------------------------------------------*/\n    incr = width - BLKSIZE;\n\n    /* Dering the first line of macro blocks */\n    for (MB_H = 0; MB_H < width; MB_H += MBSIZE)\n    {\n        max_diff = (QP_store[(MB_H)>>4] >> 2) + 4;\n\n        /* threshold determination */\n        max_range_blk = max_thres_blk = 0;\n        blks = 0;\n\n        for (BLK_V = 0; BLK_V < MBSIZE; BLK_V += BLKSIZE)\n        {\n            for (BLK_H = 0; BLK_H < MBSIZE; BLK_H += BLKSIZE)\n            {\n                ptr = &Rec_Y[(int32)(BLK_V) * width + MB_H + BLK_H];\n                FindMaxMin(ptr, &min_blk, &max_blk, incr);\n\n                thres[blks] = (max_blk + min_blk + 1) >> 1;\n                range[blks] = max_blk - min_blk;\n\n                if (range[blks] >= max_range_blk)\n                {\n                    max_range_blk = range[blks];\n                    max_thres_blk = thres[blks];\n                }\n                blks++;\n            }\n        }\n\n        blks = 0;\n        for (v_blk = 0; v_blk < MBSIZE; v_blk += BLKSIZE)\n        {\n            v0 = ((v_blk - 1) >= 1) ? (v_blk - 1) : 1;\n            for (h_blk = MB_H; h_blk < MB_H + MBSIZE; h_blk += BLKSIZE)\n            {\n                h0 = ((h_blk - 1) >= 1) ? (h_blk - 1) : 1;\n\n                /* threshold rearrangement for flat region adjacent to non-flat region */\n                if (range[blks]<32 && max_range_blk >= 64)\n                    thres[blks] = max_thres_blk;\n\n                /* threshold rearrangement for deblocking\n                (blockiness annoying at DC dominant region) */\n                if (max_range_blk >= 16)\n                {\n                    /* adaptive smoothing */\n                    thr = thres[blks];\n\n                    AdaptiveSmooth_NoMMX(Rec_Y, v0, h0, v_blk, h_blk,\n                                         thr, width, max_diff);\n                }\n                blks++;\n            } /* block level (Luminance) */\n        }\n    } /* macroblock level */\n\n\n    /* Do the rest of the macro-block-lines */\n    for (MB_V = MBSIZE; MB_V < height; MB_V += MBSIZE)\n    {\n        /* First macro-block */\n        max_diff = (QP_store[((((int32)MB_V*width)>>4))>>4] >> 2) + 4;\n        /* threshold determination */\n        max_range_blk = max_thres_blk = 0;\n        blks = 0;\n        for (BLK_V = 0; BLK_V < MBSIZE; BLK_V += BLKSIZE)\n        {\n            for (BLK_H = 0; BLK_H < MBSIZE; BLK_H += BLKSIZE)\n            {\n                ptr = &Rec_Y[(int32)(MB_V + BLK_V) * width + BLK_H];\n                FindMaxMin(ptr, &min_blk, &max_blk, incr);\n                thres[blks] = (max_blk + min_blk + 1) >> 1;\n                range[blks] = max_blk - min_blk;\n\n                if (range[blks] >= max_range_blk)\n                {\n                    max_range_blk = range[blks];\n                    max_thres_blk = thres[blks];\n                }\n                blks++;\n            }\n        }\n\n        blks = 0;\n        for (v_blk = MB_V; v_blk < MB_V + MBSIZE; v_blk += BLKSIZE)\n        {\n            v0 = v_blk - 1;\n            for (h_blk = 0; h_blk < MBSIZE; h_blk += BLKSIZE)\n            {\n                h0 = ((h_blk - 1) >= 1) ? (h_blk - 1) : 1;\n\n                /* threshold rearrangement for flat region adjacent to non-flat region */\n                if (range[blks]<32 && max_range_blk >= 64)\n                    thres[blks] = max_thres_blk;\n\n                /* threshold rearrangement for deblocking\n                (blockiness annoying at DC dominant region) */\n                if (max_range_blk >= 16)\n                {\n                    /* adaptive smoothing */\n                    thr = thres[blks];\n\n                    AdaptiveSmooth_NoMMX(Rec_Y, v0, h0, v_blk, h_blk,\n                                         thr, width, max_diff);\n                }\n                blks++;\n            }\n        } /* block level (Luminance) */\n\n        /* Rest of the macro-blocks */\n        for (MB_H = MBSIZE; MB_H < width; MB_H += MBSIZE)\n        {\n            max_diff = (QP_store[((((int32)MB_V*width)>>4)+MB_H)>>4] >> 2) + 4;\n\n            /* threshold determination */\n            max_range_blk = max_thres_blk = 0;\n            blks = 0;\n\n            mb_indx = (MB_V / 8) * (width / 8) + MB_H / 8;\n            for (BLK_V = 0; BLK_V < MBSIZE; BLK_V += BLKSIZE)\n            {\n                for (BLK_H = 0; BLK_H < MBSIZE; BLK_H += BLKSIZE)\n                {\n                    blk_indx = mb_indx + (BLK_V / 8) * width / 8 + BLK_H / 8;\n                    /* Update based on pp_mod only */\n                    if ((pp_mod[blk_indx]&0x4) != 0)\n                    {\n                        ptr = &Rec_Y[(int32)(MB_V + BLK_V) * width + MB_H + BLK_H];\n                        FindMaxMin(ptr, &min_blk, &max_blk, incr);\n                        thres[blks] = (max_blk + min_blk + 1) >> 1;\n                        range[blks] = max_blk - min_blk;\n\n                        if (range[blks] >= max_range_blk)\n                        {\n                            max_range_blk = range[blks];\n                            max_thres_blk = thres[blks];\n                        }\n                    }\n                    blks++;\n                }\n            }\n\n            blks = 0;\n            for (v_blk = MB_V; v_blk < MB_V + MBSIZE; v_blk += BLKSIZE)\n            {\n                v0 = v_blk - 1;\n                mb_indx = (v_blk / 8) * (width / 8);\n                for (h_blk = MB_H; h_blk < MB_H + MBSIZE; h_blk += BLKSIZE)\n                {\n                    h0 = h_blk - 1;\n                    blk_indx = mb_indx + h_blk / 8;\n                    if ((pp_mod[blk_indx]&0x4) != 0)\n                    {\n                        /* threshold rearrangement for flat region adjacent to non-flat region */\n                        if (range[blks]<32 && max_range_blk >= 64)\n                            thres[blks] = max_thres_blk;\n\n                        /* threshold rearrangement for deblocking\n                        (blockiness annoying at DC dominant region) */\n                        if (max_range_blk >= 16)\n                        {\n                            /* adaptive smoothing */\n                            thr = thres[blks];\n#ifdef NoMMX\n                            AdaptiveSmooth_NoMMX(Rec_Y, v0, h0, v_blk, h_blk,\n                                                 thr, width, max_diff);\n#else\n                            DeringAdaptiveSmoothMMX(&Rec_Y[v0*width+h0],\n                                                    width, thr, max_diff);\n#endif\n                        }\n                    }\n                    blks++;\n                }\n            } /* block level (Luminance) */\n        } /* macroblock level */\n    } /* macroblock level */\n\n    /*----------------------------------------------------------------------------\n    ; Return nothing or data or data pointer\n    ----------------------------------------------------------------------------*/\n    return;\n}\n#endif\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/find_min_max.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*\n------------------------------------------------------------------------------\n INPUT AND OUTPUT DEFINITIONS\n\n Inputs:\n    input_ptr = pointer to the buffer containing values of type UChar\n            in a 2D block of data.\n    min_ptr = pointer to the minimum value of type Int to be found in a\n          square block of size BLKSIZE contained in 2D block of data.\n    max_ptr = pointer to the maximum value of type Int to be found in a\n          square block of size BLKSIZE contained in 2D block of data.\n    incr = value of type Int representing the width of 2D block of data.\n\n Local Stores/Buffers/Pointers Needed:\n    None\n\n Global Stores/Buffers/Pointers Needed:\n    None\n\n Outputs:\n    None\n\n Pointers and Buffers Modified:\n    min_ptr points to the found minimum value in the square block of\n    size BLKSIZE contained in 2D block of data.\n\n    max_ptr points to the found maximum value in the square block of\n    size BLKSIZE contained in 2D block of data.\n\n Local Stores Modified:\n    None\n\n Global Stores Modified:\n    None\n\n------------------------------------------------------------------------------\n FUNCTION DESCRIPTION\n\n This function finds the maximum and the minimum values in a square block of\n data of size BLKSIZE * BLKSIZE. The data is contained in the buffer which\n represents a 2D block of data that is larger than BLKSIZE * BLKSIZE.\n This is illustrated below.\n\n    mem loc x + 00h -> o o o o o o o o o o o o o o o o\n    mem loc x + 10h -> o o o o o X X X X X X X X o o o\n    mem loc x + 20h -> o o o o o X X X X X X X X o o o\n    mem loc x + 30h -> o o o o o X X X X X X X X o o o\n    mem loc x + 40h -> o o o o o X X X X X X X X o o o\n    mem loc x + 50h -> o o o o o X X X X X X X X o o o\n    mem loc x + 60h -> o o o o o X X X X X X X X o o o\n    mem loc x + 70h -> o o o o o X X X X X X X X o o o\n    mem loc x + 80h -> o o o o o X X X X X X X X o o o\n    mem loc x + 90h -> o o o o o o o o o o o o o o o o\n    mem loc x + A0h -> o o o o o o o o o o o o o o o o\n    mem loc x + B0h -> o o o o o o o o o o o o o o o o\n\nFor illustration purposes, the diagram assumes that BLKSIZE is equal to 8\nbut this is not a requirement. In this diagram, the buffer starts at\nlocation x but the input pointer, input_ptr, passed into this function\nwould be the first row of data to be searched which is at x + 15h. The\nvalue of incr passed onto this function represents the amount the input_ptr\nneeds to be incremented to point to the next row of data.\n\nThis function compares each value in a row to the current maximum and\nminimum. After each row, input_ptr is incremented to point to the next row.\nThis is repeated until all rows have been processed. When the search is\ncomplete the location pointed to by min_ptr contains the minimum value\nfound and the location pointed to by max_ptr contains the maximum value found.\n\n------------------------------------------------------------------------------\n*/\n\n\n/*----------------------------------------------------------------------------\n; INCLUDES\n----------------------------------------------------------------------------*/\n#include    \"mp4dec_lib.h\"\n#include    \"post_proc.h\"\n\n/*----------------------------------------------------------------------------\n; MACROS\n; Define module specific macros here\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; DEFINES\n; Include all pre-processor statements here. Include conditional\n; compile variables also.\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; LOCAL FUNCTION DEFINITIONS\n; Function Prototype declaration\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; LOCAL STORE/BUFFER/POINTER DEFINITIONS\n; Variable declaration - defined here and used outside this module\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; EXTERNAL FUNCTION REFERENCES\n; Declare functions defined elsewhere and referenced in this module\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES\n; Declare variables used in this module but defined elsewhere\n----------------------------------------------------------------------------*/\n\n#ifdef PV_POSTPROC_ON\n/*----------------------------------------------------------------------------\n; FUNCTION CODE\n----------------------------------------------------------------------------*/\nvoid  FindMaxMin(\n    uint8 *input_ptr,\n    int *min_ptr,\n    int *max_ptr,\n    int incr)\n{\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    register    uint    i, j;\n    register    int min, max;\n\n    /*----------------------------------------------------------------------------\n    ; Function body here\n    ----------------------------------------------------------------------------*/\n    max = min = *input_ptr;\n    /*  incr = incr - BLKSIZE; */   /*  09/06/2001, already passed in as width - BLKSIZE */\n\n    for (i = BLKSIZE; i > 0; i--)\n    {\n        for (j = BLKSIZE; j > 0; j--)\n        {\n            if (*input_ptr > max)\n            {\n                max = *input_ptr;\n            }\n            else if (*input_ptr < min)\n            {\n                min = *input_ptr;\n            }\n            input_ptr += 1;\n        }\n\n        /* set pointer to the beginning of the next row*/\n        input_ptr += incr;\n    }\n\n    *max_ptr = max;\n    *min_ptr = min;\n    /*----------------------------------------------------------------------------\n    ; Return nothing or data or data pointer\n    ----------------------------------------------------------------------------*/\n    return;\n}\n#endif\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/get_pred_adv_b_add.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*\n------------------------------------------------------------------------------\n INPUT AND OUTPUT DEFINITIONS\n\n Inputs:\n    xpos = x half-pixel of (x,y) coordinates within a VOP; motion\n           compensated coordinates; native type\n    ypos = y half-pixel of (x,y) coordinates within a VOP; motion\n           compensated coordinates; native type\n    comp = pointer to 8-bit compensated prediction values within a VOP;\n        computed by this module (i/o); full-pel resolution\n    c_prev = pointer to previous 8-bit prediction values within a VOP;\n          values range from (0-255); full-pel resolution\n    sh_d = pointer to residual values used to compensate the predicted\n        value; values range from (-512 to 511); full-pel resolution\n    width = width of the VOP in pixels (x axis); full-pel resolution\n    rnd1 = rounding value for case when one dimension uses half-pel\n           resolution\n    rnd2 = rounding value for case when two dimensions uses half-pel\n           resolution\n    CBP = flag indicating whether residual is all zeros\n          (0 -> all zeros, 1 -> not all zeros)\n        outside_flag = flag indicating whether motion vector is outside the\n               VOP (0 -> inside, 1 -> outside)\n\n Outputs:\n    returns 1\n\n Local Stores/Buffers/Pointers Needed:\n    None\n\n Global Stores/Buffers/Pointers Needed:\n    None\n\n Pointers and Buffers Modified:\n    comp = buffer contains newly computed compensated prediction values\n\n Local Stores Modified:\n    None\n\n Global Stores Modified:\n    None\n\n------------------------------------------------------------------------------\n FUNCTION DESCRIPTION\n\n Compute pixel values for a block in the current VOP. The prediction\n values are generated by averaging pixel values in the previous VOP; the\n block position in the previous frame is computed from the current block's\n motion vector. The computed pixel values are then computed by adding the\n prediction values to the block residual values.\n\n\n------------------------------------------------------------------------------\n*/\n\n/*----------------------------------------------------------------------------\n; INCLUDES\n----------------------------------------------------------------------------*/\n#include \"mp4dec_lib.h\"\n#include \"motion_comp.h\"\n\n#define OSCL_DISABLE_WARNING_CONV_POSSIBLE_LOSS_OF_DATA\n#include \"osclconfig_compiler_warnings.h\"\n\nint GetPredAdvancedBy0x0(\n    uint8 *prev,        /* i */\n    uint8 *pred_block,      /* i */\n    int width,      /* i */\n    int pred_width_rnd /* i */\n)\n{\n    uint    i;      /* loop variable */\n    int offset, offset2;\n    uint32  pred_word, word1, word2;\n    int tmp;\n\n    /* initialize offset to adjust pixel counter */\n    /*    the next row; full-pel resolution      */\n    offset = width - B_SIZE; /* offset for prev */\n    offset2 = (pred_width_rnd >> 1) - 4; /* offset for pred_block */\n\n    tmp = (uint32)prev & 0x3;\n    pred_block -= offset2; /* preset */\n\n    if (tmp == 0)  /* word-aligned */\n    {\n        for (i = B_SIZE; i > 0; i--)\n        {\n            *((uint32*)(pred_block += offset2)) = *((uint32*)prev);\n            *((uint32*)(pred_block += 4)) = *((uint32*)(prev + 4));\n            prev += width;\n        }\n        return 1;\n    }\n    else if (tmp == 1) /* first position */\n    {\n        prev--; /* word-aligned */\n\n        for (i = B_SIZE; i > 0; i--)\n        {\n            word1 = *((uint32*)prev); /* read 4 bytes, b4 b3 b2 b1 */\n            word2 = *((uint32*)(prev += 4));  /* read 4 bytes, b8 b7 b6 b5 */\n            word1 >>= 8; /* 0 b4 b3 b2 */\n            pred_word = word1 | (word2 << 24);  /* b5 b4 b3 b2 */\n            *((uint32*)(pred_block += offset2)) = pred_word;\n\n            word1 = *((uint32*)(prev += 4)); /* b12 b11 b10 b9 */\n            word2 >>= 8; /* 0 b8 b7 b6 */\n            pred_word = word2 | (word1 << 24); /* b9 b8 b7 b6 */\n            *((uint32*)(pred_block += 4)) = pred_word;\n\n            prev += offset;\n        }\n\n        return 1;\n    }\n    else if (tmp == 2) /* second position */\n    {\n        prev -= 2; /* word1-aligned */\n\n        for (i = B_SIZE; i > 0; i--)\n        {\n            word1 = *((uint32*)prev); /* read 4 bytes, b4 b3 b2 b1 */\n            word2 = *((uint32*)(prev += 4));  /* read 4 bytes, b8 b7 b6 b5 */\n            word1 >>= 16; /* 0 0 b4 b3 */\n            pred_word = word1 | (word2 << 16);  /* b6 b5 b4 b3 */\n            *((uint32*)(pred_block += offset2)) = pred_word;\n\n            word1 = *((uint32*)(prev += 4)); /* b12 b11 b10 b9 */\n            word2 >>= 16; /* 0 0 b8 b7 */\n            pred_word = word2 | (word1 << 16); /* b10 b9 b8 b7 */\n            *((uint32*)(pred_block += 4)) = pred_word;\n\n\n            prev += offset;\n        }\n\n        return 1;\n    }\n    else /* third position */\n    {\n        prev -= 3; /* word1-aligned */\n\n        for (i = B_SIZE; i > 0; i--)\n        {\n            word1 = *((uint32*)prev); /* read 4 bytes, b4 b3 b2 b1 */\n            word2 = *((uint32*)(prev += 4));  /* read 4 bytes, b8 b7 b6 b5 */\n            word1 >>= 24; /* 0 0 0 b4 */\n            pred_word = word1 | (word2 << 8);   /* b7 b6 b5 b4 */\n            *((uint32*)(pred_block += offset2)) = pred_word;\n\n            word1 = *((uint32*)(prev += 4)); /* b12 b11 b10 b9 */\n            word2 >>= 24; /* 0 0 0 b8 */\n            pred_word = word2 | (word1 << 8); /* b11 b10 b9 b8 */\n            *((uint32*)(pred_block += 4)) = pred_word;\n\n            prev += offset;\n        }\n\n        return 1;\n    }\n}\n\n/**************************************************************************/\nint GetPredAdvancedBy0x1(\n    uint8 *prev,        /* i */\n    uint8 *pred_block,      /* i */\n    int width,      /* i */\n    int pred_width_rnd /* i */\n)\n{\n    uint    i;      /* loop variable */\n    int offset, offset2;\n    uint32 word1, word2, word3, word12;\n    int tmp;\n    int rnd1;\n    uint32 mask;\n\n    /* initialize offset to adjust pixel counter */\n    /*    the next row; full-pel resolution      */\n    offset = width - B_SIZE; /* offset for prev */\n    offset2 = (pred_width_rnd >> 1) - 4; /* offset of pred_block */\n\n    rnd1 = pred_width_rnd & 1;\n\n    /* Branch based on pixel location (half-pel or full-pel) for x and y */\n    pred_block -= offset2; /* preset */\n\n    tmp = (uint32)prev & 3;\n    mask = 254;\n    mask |= (mask << 8);\n    mask |= (mask << 16); /* 0xFEFEFEFE */\n\n    if (tmp == 0) /* word-aligned */\n    {\n        if (rnd1 == 1)\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word1 = *((uint32*)prev); /* b4 b3 b2 b1 */\n                word2 = *((uint32*)(prev += 4)); /* b8 b7 b6 b5 */\n                word12 = (word1 >> 8); /* 0 b4 b3 b2 */\n                word12 |= (word2 << 24); /* b5 b4 b3 b2 */\n                word3 = word1 | word12; // rnd1 = 1; otherwise word3 = word1&word12\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word12 >> 1);\n                word1 += word3;\n                *((uint32*)(pred_block += offset2)) = word1; /* write 4 pixels */\n\n                word1 = *((uint32*)(prev += 4)); /* b12 b11 b10 b9 */\n                word12 = (word2 >> 8); /* 0 b8 b7 b6 */\n                word12 |= (word1 << 24); /* b9 b8 b7 b6 */\n                word3 = word2 | word12;\n                word2 &= mask;\n                word3 &= (~mask);  /* 0x1010101, check last bit */\n                word12 &= mask;\n                word2 >>= 1;\n                word2 = word2 + (word12 >> 1);\n                word2 += word3;\n                *((uint32*)(pred_block += 4)) = word2; /* write 4 pixels */\n\n                prev += offset;\n            }\n            return 1;\n        }\n        else /* rnd1 == 0 */\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word1 = *((uint32*)prev); /* b4 b3 b2 b1 */\n\n                word2 = *((uint32*)(prev += 4)); /* b8 b7 b6 b5 */\n                word12 = (word1 >> 8); /* 0 b4 b3 b2 */\n                word12 |= (word2 << 24); /* b5 b4 b3 b2 */\n                word3 = word1 & word12; // rnd1 = 1; otherwise word3 = word1&word12\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word12 >> 1);\n                word1 += word3;\n                *((uint32*)(pred_block += offset2)) = word1; /* write 4 pixels */\n\n                word1 = *((uint32*)(prev += 4)); /* b12 b11 b10 b9 */\n                word12 = (word2 >> 8); /* 0 b8 b7 b6 */\n                word12 |= (word1 << 24); /* b9 b8 b7 b6 */\n                word3 = word2 & word12;\n                word2 &= mask;\n                word3 &= (~mask);  /* 0x1010101, check last bit */\n                word12 &= mask;\n                word2 >>= 1;\n                word2 = word2 + (word12 >> 1);\n                word2 += word3;\n                *((uint32*)(pred_block += 4)) = word2; /* write 4 pixels */\n\n                prev += offset;\n            }\n            return 1;\n        } /* rnd1 */\n    }\n    else if (tmp == 1)\n    {\n        prev--; /* word-aligned */\n        if (rnd1 == 1)\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word1 = *((uint32*)prev); /* b3 b2 b1 b0 */\n                word2 = *((uint32*)(prev += 4)); /* b7 b6 b5 b4 */\n                word12 = (word1 >> 8); /* 0 b3 b2 b1 */\n                word1 >>= 16; /* 0 0 b3 b2 */\n                word12 |= (word2 << 24); /* b4 b3 b2 b1 */\n                word1 |= (word2 << 16); /* b5 b4 b3 b2 */\n                word3 = word1 | word12; // rnd1 = 1; otherwise word3 = word1&word12\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word12 >> 1);\n                word1 += word3;\n                *((uint32*)(pred_block += offset2)) = word1; /* write 4 pixels */\n\n                word1 = *((uint32*)(prev += 4)); /* b11 b10 b9 b8 */\n                word12 = (word2 >> 8); /* 0 b7 b6 b5 */\n                word2 >>= 16; /* 0 0 b7 b6 */\n                word12 |= (word1 << 24); /* b8 b7 b6 b5 */\n                word2 |= (word1 << 16); /* b9 b8 b7 b6 */\n                word3 = word2 | word12; // rnd1 = 1; otherwise word3 = word2&word12\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word2 >>= 1;\n                word2 = word2 + (word12 >> 1);\n                word2 += word3;\n                *((uint32*)(pred_block += 4)) = word2; /* write 4 pixels */\n\n                prev += offset;\n            }\n            return 1;\n        }\n        else /* rnd1 = 0 */\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word1 = *((uint32*)prev); /* b3 b2 b1 b0 */\n\n                word2 = *((uint32*)(prev += 4)); /* b7 b6 b5 b4 */\n                word12 = (word1 >> 8); /* 0 b3 b2 b1 */\n                word1 >>= 16; /* 0 0 b3 b2 */\n                word12 |= (word2 << 24); /* b4 b3 b2 b1 */\n                word1 |= (word2 << 16); /* b5 b4 b3 b2 */\n                word3 = word1 & word12;\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word12 >> 1);\n                word1 += word3;\n                *((uint32*)(pred_block += offset2)) = word1; /* write 4 pixels */\n\n                word1 = *((uint32*)(prev += 4)); /* b11 b10 b9 b8 */\n                word12 = (word2 >> 8); /* 0 b7 b6 b5 */\n                word2 >>= 16; /* 0 0 b7 b6 */\n                word12 |= (word1 << 24); /* b8 b7 b6 b5 */\n                word2 |= (word1 << 16); /* b9 b8 b7 b6 */\n                word3 = word2 & word12;\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word2 >>= 1;\n                word2 = word2 + (word12 >> 1);\n                word2 += word3;\n                *((uint32*)(pred_block += 4)) = word2; /* write 4 pixels */\n\n                prev += offset;\n            }\n            return 1;\n        } /* rnd1 */\n    }\n    else if (tmp == 2)\n    {\n        prev -= 2; /* word-aligned */\n        if (rnd1 == 1)\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word1 = *((uint32*)prev); /* b2 b1 b0 bN1 */\n                word2 = *((uint32*)(prev += 4)); /* b6 b5 b4 b3 */\n                word12 = (word1 >> 16); /* 0 0 b2 b1 */\n                word1 >>= 24; /* 0 0 0 b2 */\n                word12 |= (word2 << 16); /* b4 b3 b2 b1 */\n                word1 |= (word2 << 8); /* b5 b4 b3 b2 */\n                word3 = word1 | word12; // rnd1 = 1; otherwise word3 = word1&word12\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word12 >> 1);\n                word1 += word3;\n                *((uint32*)(pred_block += offset2)) = word1; /* write 4 pixels */\n\n                word1 = *((uint32*)(prev += 4)); /* b10 b9 b8 b7 */\n                word12 = (word2 >> 16); /* 0 0 b6 b5 */\n                word2 >>= 24; /* 0 0 0 b6 */\n                word12 |= (word1 << 16); /* b8 b7 b6 b5 */\n                word2 |= (word1 << 8); /* b9 b8 b7 b6 */\n                word3 = word2 | word12; // rnd1 = 1; otherwise word3 = word1&word12\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word2 >>= 1;\n                word2 = word2 + (word12 >> 1);\n                word2 += word3;\n                *((uint32*)(pred_block += 4)) = word2; /* write 4 pixels */\n                prev += offset;\n            }\n            return 1;\n        }\n        else /* rnd1 == 0 */\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word1 = *((uint32*)prev); /* b2 b1 b0 bN1 */\n                word2 = *((uint32*)(prev += 4)); /* b6 b5 b4 b3 */\n                word12 = (word1 >> 16); /* 0 0 b2 b1 */\n                word1 >>= 24; /* 0 0 0 b2 */\n                word12 |= (word2 << 16); /* b4 b3 b2 b1 */\n                word1 |= (word2 << 8); /* b5 b4 b3 b2 */\n                word3 = word1 & word12; // rnd1 = 1; otherwise word3 = word1&word12\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word12 >> 1);\n                word1 += word3;\n                *((uint32*)(pred_block += offset2)) = word1; /* write 4 pixels */\n\n                word1 = *((uint32*)(prev += 4)); /* b10 b9 b8 b7 */\n                word12 = (word2 >> 16); /* 0 0 b6 b5 */\n                word2 >>= 24; /* 0 0 0 b6 */\n                word12 |= (word1 << 16); /* b8 b7 b6 b5 */\n                word2 |= (word1 << 8); /* b9 b8 b7 b6 */\n                word3 = word2 & word12; // rnd1 = 1; otherwise word3 = word1&word12\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word2 >>= 1;\n                word2 = word2 + (word12 >> 1);\n                word2 += word3;\n                *((uint32*)(pred_block += 4)) = word2; /* write 4 pixels */\n                prev += offset;\n            }\n            return 1;\n        }\n    }\n    else /* tmp = 3 */\n    {\n        prev -= 3; /* word-aligned */\n        if (rnd1 == 1)\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word1 = *((uint32*)prev); /* b1 b0 bN1 bN2 */\n                word2 = *((uint32*)(prev += 4)); /* b5 b4 b3 b2 */\n                word12 = (word1 >> 24); /* 0 0 0 b1 */\n                word12 |= (word2 << 8); /* b4 b3 b2 b1 */\n                word1 = word2;\n                word3 = word1 | word12; // rnd1 = 1; otherwise word3 = word1&word12\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word12 >> 1);\n                word1 += word3;\n                *((uint32*)(pred_block += offset2)) = word1; /* write 4 pixels */\n\n                word1 = *((uint32*)(prev += 4)); /* b9 b8 b7 b6 */\n                word12 = (word2 >> 24); /* 0 0 0 b5 */\n                word12 |= (word1 << 8); /* b8 b7 b6 b5 */\n                word2 = word1; /* b9 b8 b7 b6 */\n                word3 = word2 | word12; // rnd1 = 1; otherwise word3 = word1&word12\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word2 >>= 1;\n                word2 = word2 + (word12 >> 1);\n                word2 += word3;\n                *((uint32*)(pred_block += 4)) = word2; /* write 4 pixels */\n                prev += offset;\n            }\n            return 1;\n        }\n        else\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word1 = *((uint32*)prev); /* b1 b0 bN1 bN2 */\n                word2 = *((uint32*)(prev += 4)); /* b5 b4 b3 b2 */\n                word12 = (word1 >> 24); /* 0 0 0 b1 */\n                word12 |= (word2 << 8); /* b4 b3 b2 b1 */\n                word1 = word2;\n                word3 = word1 & word12; // rnd1 = 1; otherwise word3 = word1&word12\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word12 >> 1);\n                word1 += word3;\n                *((uint32*)(pred_block += offset2)) = word1; /* write 4 pixels */\n\n                word1 = *((uint32*)(prev += 4)); /* b9 b8 b7 b6 */\n                word12 = (word2 >> 24); /* 0 0 0 b5 */\n                word12 |= (word1 << 8); /* b8 b7 b6 b5 */\n                word2 = word1; /* b9 b8 b7 b6 */\n                word3 = word2 & word12; // rnd1 = 1; otherwise word3 = word1&word12\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word2 >>= 1;\n                word2 = word2 + (word12 >> 1);\n                word2 += word3;\n                *((uint32*)(pred_block += 4)) = word2; /* write 4 pixels */\n                prev += offset;\n            }\n            return 1;\n        }\n    }\n}\n\n/**************************************************************************/\nint GetPredAdvancedBy1x0(\n    uint8 *prev,        /* i */\n    uint8 *pred_block,      /* i */\n    int width,      /* i */\n    int pred_width_rnd /* i */\n)\n{\n    uint    i;      /* loop variable */\n    int offset, offset2;\n    uint32  word1, word2, word3, word12, word22;\n    int tmp;\n    int rnd1;\n    uint32 mask;\n\n    /* initialize offset to adjust pixel counter */\n    /*    the next row; full-pel resolution      */\n    offset = width - B_SIZE; /* offset for prev */\n    offset2 = (pred_width_rnd >> 1) - 4; /* offset for pred_block */\n\n    rnd1 = pred_width_rnd & 1;\n\n    /* Branch based on pixel location (half-pel or full-pel) for x and y */\n    pred_block -= offset2; /* preset */\n\n    tmp = (uint32)prev & 3;\n    mask = 254;\n    mask |= (mask << 8);\n    mask |= (mask << 16); /* 0xFEFEFEFE */\n\n    if (tmp == 0) /* word-aligned */\n    {\n        prev -= 4;\n        if (rnd1 == 1)\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word1 = *((uint32*)(prev += 4));\n                word2 = *((uint32*)(prev + width));\n                word3 = word1 | word2; // rnd1 = 1; otherwise word3 = word1&word2\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word2 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word2 >> 1);\n                word1 += word3;\n                *((uint32*)(pred_block += offset2)) = word1;\n                word1 = *((uint32*)(prev += 4));\n                word2 = *((uint32*)(prev + width));\n                word3 = word1 | word2; // rnd1 = 1; otherwise word3 = word1&word2\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word2 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word2 >> 1);\n                word1 += word3;\n                *((uint32*)(pred_block += 4)) = word1;\n\n                prev += offset;\n            }\n            return 1;\n        }\n        else   /* rnd1 = 0 */\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word1 = *((uint32*)(prev += 4));\n                word2 = *((uint32*)(prev + width));\n                word3 = word1 & word2;  /* rnd1 = 0; */\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word2 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word2 >> 1);\n                word1 += word3;\n                *((uint32*)(pred_block += offset2)) = word1;\n                word1 = *((uint32*)(prev += 4));\n                word2 = *((uint32*)(prev + width));\n                word3 = word1 & word2;  /* rnd1 = 0; */\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word2 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word2 >> 1);\n                word1 += word3;\n                *((uint32*)(pred_block += 4)) = word1;\n\n                prev += offset;\n            }\n            return 1;\n        }\n    }\n    else if (tmp == 1)\n    {\n        prev--; /* word-aligned */\n        if (rnd1 == 1)\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word12 = *((uint32*)prev); /* read b4 b3 b2 b1 */\n                word22 = *((uint32*)(prev + width));\n\n                word1 = *((uint32*)(prev += 4)); /* read b8 b7 b6 b5 */\n                word2 = *((uint32*)(prev + width));\n                word12 >>= 8; /* 0 b4 b3 b2 */\n                word22 >>= 8;\n                word12 = word12 | (word1 << 24); /* b5 b4 b3 b2 */\n                word22 = word22 | (word2 << 24);\n                word3 = word12 | word22;\n                word12 &= mask;\n                word22 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 >>= 1;\n                word12 = word12 + (word22 >> 1);\n                word12 += word3;\n                *((uint32*)(pred_block += offset2)) = word12;\n\n                word12 = *((uint32*)(prev += 4)); /* read b12 b11 b10 b9 */\n                word22 = *((uint32*)(prev + width));\n                word1 >>= 8; /* 0 b8 b7 b6 */\n                word2 >>= 8;\n                word1 = word1 | (word12 << 24); /* b9 b8 b7 b6 */\n                word2 = word2 | (word22 << 24);\n                word3 = word1 | word2;\n                word1 &= mask;\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word1 >>= 1;\n                word1 = word1 + (word2 >> 1);\n                word1 += word3;\n                *((uint32*)(pred_block += 4)) = word1;\n                prev += offset;\n            }\n            return 1;\n        }\n        else /* rnd1 = 0 */\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word12 = *((uint32*)prev); /* read b4 b3 b2 b1 */\n                word22 = *((uint32*)(prev + width));\n\n                word1 = *((uint32*)(prev += 4)); /* read b8 b7 b6 b5 */\n                word2 = *((uint32*)(prev + width));\n                word12 >>= 8; /* 0 b4 b3 b2 */\n                word22 >>= 8;\n                word12 = word12 | (word1 << 24); /* b5 b4 b3 b2 */\n                word22 = word22 | (word2 << 24);\n                word3 = word12 & word22;\n                word12 &= mask;\n                word22 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 >>= 1;\n                word12 = word12 + (word22 >> 1);\n                word12 += word3;\n                *((uint32*)(pred_block += offset2)) = word12;\n\n                word12 = *((uint32*)(prev += 4)); /* read b12 b11 b10 b9 */\n                word22 = *((uint32*)(prev + width));\n                word1 >>= 8; /* 0 b8 b7 b6 */\n                word2 >>= 8;\n                word1 = word1 | (word12 << 24); /* b9 b8 b7 b6 */\n                word2 = word2 | (word22 << 24);\n                word3 = word1 & word2;\n                word1 &= mask;\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word1 >>= 1;\n                word1 = word1 + (word2 >> 1);\n                word1 += word3;\n                *((uint32*)(pred_block += 4)) = word1;\n                prev += offset;\n            }\n            return 1;\n        }\n    }\n    else if (tmp == 2)\n    {\n        prev -= 2; /* word-aligned */\n        if (rnd1 == 1)\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word12 = *((uint32*)prev); /* read b4 b3 b2 b1 */\n                word22 = *((uint32*)(prev + width));\n\n                word1 = *((uint32*)(prev += 4)); /* read b8 b7 b6 b5 */\n                word2 = *((uint32*)(prev + width));\n                word12 >>= 16; /* 0 0 b4 b3 */\n                word22 >>= 16;\n                word12 = word12 | (word1 << 16); /* b6 b5 b4 b3 */\n                word22 = word22 | (word2 << 16);\n                word3 = word12 | word22;\n                word12 &= mask;\n                word22 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 >>= 1;\n                word12 = word12 + (word22 >> 1);\n                word12 += word3;\n                *((uint32*)(pred_block += offset2)) = word12;\n\n                word12 = *((uint32*)(prev += 4)); /* read b12 b11 b10 b9 */\n                word22 = *((uint32*)(prev + width));\n                word1 >>= 16; /* 0 0 b8 b7 */\n                word2 >>= 16;\n                word1 = word1 | (word12 << 16); /* b10 b9 b8 b7 */\n                word2 = word2 | (word22 << 16);\n                word3 = word1 | word2;\n                word1 &= mask;\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word1 >>= 1;\n                word1 = word1 + (word2 >> 1);\n                word1 += word3;\n                *((uint32*)(pred_block += 4)) = word1;\n                prev += offset;\n            }\n            return 1;\n        }\n        else /* rnd1 = 0 */\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word12 = *((uint32*)prev); /* read b4 b3 b2 b1 */\n                word22 = *((uint32*)(prev + width));\n\n                word1 = *((uint32*)(prev += 4)); /* read b8 b7 b6 b5 */\n                word2 = *((uint32*)(prev + width));\n                word12 >>= 16; /* 0 0 b4 b3 */\n                word22 >>= 16;\n                word12 = word12 | (word1 << 16); /* b6 b5 b4 b3 */\n                word22 = word22 | (word2 << 16);\n                word3 = word12 & word22;\n                word12 &= mask;\n                word22 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 >>= 1;\n                word12 = word12 + (word22 >> 1);\n                word12 += word3;\n                *((uint32*)(pred_block += offset2)) = word12;\n\n                word12 = *((uint32*)(prev += 4)); /* read b12 b11 b10 b9 */\n                word22 = *((uint32*)(prev + width));\n                word1 >>= 16; /* 0 0 b8 b7 */\n                word2 >>= 16;\n                word1 = word1 | (word12 << 16); /* b10 b9 b8 b7 */\n                word2 = word2 | (word22 << 16);\n                word3 = word1 & word2;\n                word1 &= mask;\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word1 >>= 1;\n                word1 = word1 + (word2 >> 1);\n                word1 += word3;\n                *((uint32*)(pred_block += 4)) = word1;\n                prev += offset;\n            }\n\n            return 1;\n        }\n    }\n    else /* tmp == 3 */\n    {\n        prev -= 3; /* word-aligned */\n        if (rnd1 == 1)\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word12 = *((uint32*)prev); /* read b4 b3 b2 b1 */\n                word22 = *((uint32*)(prev + width));\n\n                word1 = *((uint32*)(prev += 4)); /* read b8 b7 b6 b5 */\n                word2 = *((uint32*)(prev + width));\n                word12 >>= 24; /* 0 0 0 b4 */\n                word22 >>= 24;\n                word12 = word12 | (word1 << 8); /* b7 b6 b5 b4 */\n                word22 = word22 | (word2 << 8);\n                word3 = word12 | word22;\n                word12 &= mask;\n                word22 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 >>= 1;\n                word12 = word12 + (word22 >> 1);\n                word12 += word3;\n                *((uint32*)(pred_block += offset2)) = word12;\n\n                word12 = *((uint32*)(prev += 4)); /* read b12 b11 b10 b9 */\n                word22 = *((uint32*)(prev + width));\n                word1 >>= 24; /* 0 0 0 b8 */\n                word2 >>= 24;\n                word1 = word1 | (word12 << 8); /* b11 b10 b9 b8 */\n                word2 = word2 | (word22 << 8);\n                word3 = word1 | word2;\n                word1 &= mask;\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word1 >>= 1;\n                word1 = word1 + (word2 >> 1);\n                word1 += word3;\n                *((uint32*)(pred_block += 4)) = word1;\n                prev += offset;\n            }\n            return 1;\n        }\n        else /* rnd1 = 0 */\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word12 = *((uint32*)prev); /* read b4 b3 b2 b1 */\n                word22 = *((uint32*)(prev + width));\n\n                word1 = *((uint32*)(prev += 4)); /* read b8 b7 b6 b5 */\n                word2 = *((uint32*)(prev + width));\n                word12 >>= 24; /* 0 0 0 b4 */\n                word22 >>= 24;\n                word12 = word12 | (word1 << 8); /* b7 b6 b5 b4 */\n                word22 = word22 | (word2 << 8);\n                word3 = word12 & word22;\n                word12 &= mask;\n                word22 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 >>= 1;\n                word12 = word12 + (word22 >> 1);\n                word12 += word3;\n                *((uint32*)(pred_block += offset2)) = word12;\n\n                word12 = *((uint32*)(prev += 4)); /* read b12 b11 b10 b9 */\n                word22 = *((uint32*)(prev + width));\n                word1 >>= 24; /* 0 0 0 b8 */\n                word2 >>= 24;\n                word1 = word1 | (word12 << 8); /* b11 b10 b9 b8 */\n                word2 = word2 | (word22 << 8);\n                word3 = word1 & word2;\n                word1 &= mask;\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word1 >>= 1;\n                word1 = word1 + (word2 >> 1);\n                word1 += word3;\n                *((uint32*)(pred_block += 4)) = word1;\n                prev += offset;\n            }\n            return 1;\n        } /* rnd */\n    } /* tmp */\n}\n\n/**********************************************************************************/\nint GetPredAdvancedBy1x1(\n    uint8 *prev,        /* i */\n    uint8 *pred_block,      /* i */\n    int width,      /* i */\n    int pred_width_rnd /* i */\n)\n{\n    uint    i;      /* loop variable */\n    int offset, offset2;\n    uint32  x1, x2, x1m, x2m, y1, y2, y1m, y2m; /* new way */\n    int tmp;\n    int rnd1, rnd2;\n    uint32 mask;\n\n    /* initialize offset to adjust pixel counter */\n    /*    the next row; full-pel resolution      */\n    offset = width - B_SIZE; /* offset for prev */\n    offset2 = (pred_width_rnd >> 1) - 8; /* offset for pred_block */\n\n    rnd1 = pred_width_rnd & 1;\n\n    rnd2 = rnd1 + 1;\n    rnd2 |= (rnd2 << 8);\n    rnd2 |= (rnd2 << 16);\n\n    mask = 0x3F;\n    mask |= (mask << 8);\n    mask |= (mask << 16); /* 0x3f3f3f3f */\n\n    tmp = (uint32)prev & 3;\n\n    pred_block -= 4; /* preset */\n\n    if (tmp == 0) /* word-aligned */\n    {\n        for (i = B_SIZE; i > 0; i--)\n        {\n            x1 = *((uint32*)prev); /* load a3 a2 a1 a0 */\n            x2 = *((uint32*)(prev + width)); /* load b3 b2 b1 b0, another line */\n            y1 = *((uint32*)(prev += 4)); /* a7 a6 a5 a4 */\n            y2 = *((uint32*)(prev + width)); /* b7 b6 b5 b4 */\n\n            x1m = (x1 >> 2) & mask; /* zero out last 2 bits */\n            x2m = (x2 >> 2) & mask;\n            x1 = x1 ^(x1m << 2);\n            x2 = x2 ^(x2m << 2);\n            x1m += x2m;\n            x1 += x2;\n\n            /* x2m, x2 free */\n            y1m = (y1 >> 2) & mask; /* zero out last 2 bits */\n            y2m = (y2 >> 2) & mask;\n            y1 = y1 ^(y1m << 2);\n            y2 = y2 ^(y2m << 2);\n            y1m += y2m;\n            y1 += y2;\n\n            /* y2m, y2 free */\n            /* x2m, x2 free */\n            x2 = *((uint32*)(prev += 4)); /* a11 a10 a9 a8 */\n            y2 = *((uint32*)(prev + width)); /* b11 b10 b9 b8 */\n            x2m = (x2 >> 2) & mask;\n            y2m = (y2 >> 2) & mask;\n            x2 = x2 ^(x2m << 2);\n            y2 = y2 ^(y2m << 2);\n            x2m += y2m;\n            x2 += y2;\n            /* y2m, y2 free */\n\n            /* now operate on x1m, x1, y1m, y1, x2m, x2 */\n            /* x1m = a3+b3, a2+b2, a1+b1, a0+b0 */\n            /* y1m = a7+b7, a6+b6, a5+b5, a4+b4 */\n            /* x2m = a11+b11, a10+b10, a9+b9, a8+b8 */\n            /* x1, y1, x2 */\n\n            y2m = x1m >> 8;\n            y2 = x1 >> 8;\n            y2m |= (y1m << 24);  /* a4+b4, a3+b3, a2+b2, a1+b1 */\n            y2 |= (y1 << 24);\n            x1m += y2m;  /* a3+b3+a4+b4, ....., a0+b0+a1+b1 */\n            x1 += y2;\n            x1 += rnd2;\n            x1 &= (mask << 2);\n            x1m += (x1 >> 2);\n            *((uint32*)(pred_block += 4)) = x1m; /* save x1m */\n\n            y2m = y1m >> 8;\n            y2 = y1 >> 8;\n            y2m |= (x2m << 24); /* a8+b8, a7+b7, a6+b6, a5+b5 */\n            y2 |= (x2 << 24);\n            y1m += y2m;  /* a7+b7+a8+b8, ....., a4+b4+a5+b5 */\n            y1 += y2;\n            y1 += rnd2;\n            y1 &= (mask << 2);\n            y1m += (y1 >> 2);\n            *((uint32*)(pred_block += 4)) = y1m; /* save y1m */\n\n            pred_block += offset2;\n            prev += offset;\n        }\n\n        return 1;\n    }\n    else if (tmp == 1)\n    {\n        prev--; /* to word-aligned */\n        for (i = B_SIZE; i > 0; i--)\n        {\n            x1 = *((uint32*)prev); /* load a3 a2 a1 a0 */\n            x2 = *((uint32*)(prev + width)); /* load b3 b2 b1 b0, another line */\n            y1 = *((uint32*)(prev += 4)); /* a7 a6 a5 a4 */\n            y2 = *((uint32*)(prev + width)); /* b7 b6 b5 b4 */\n\n            x1m = (x1 >> 2) & mask; /* zero out last 2 bits */\n            x2m = (x2 >> 2) & mask;\n            x1 = x1 ^(x1m << 2);\n            x2 = x2 ^(x2m << 2);\n            x1m += x2m;\n            x1 += x2;\n\n            /* x2m, x2 free */\n            y1m = (y1 >> 2) & mask; /* zero out last 2 bits */\n            y2m = (y2 >> 2) & mask;\n            y1 = y1 ^(y1m << 2);\n            y2 = y2 ^(y2m << 2);\n            y1m += y2m;\n            y1 += y2;\n\n            /* y2m, y2 free */\n            /* x2m, x2 free */\n            x2 = *((uint32*)(prev += 4)); /* a11 a10 a9 a8 */\n            y2 = *((uint32*)(prev + width)); /* b11 b10 b9 b8 */\n            x2m = (x2 >> 2) & mask;\n            y2m = (y2 >> 2) & mask;\n            x2 = x2 ^(x2m << 2);\n            y2 = y2 ^(y2m << 2);\n            x2m += y2m;\n            x2 += y2;\n            /* y2m, y2 free */\n\n            /* now operate on x1m, x1, y1m, y1, x2m, x2 */\n            /* x1m = a3+b3, a2+b2, a1+b1, a0+b0 */\n            /* y1m = a7+b7, a6+b6, a5+b5, a4+b4 */\n            /* x2m = a11+b11, a10+b10, a9+b9, a8+b8 */\n            /* x1, y1, x2 */\n\n            x1m >>= 8 ;\n            x1 >>= 8;\n            x1m |= (y1m << 24);  /* a4+b4, a3+b3, a2+b2, a1+b1 */\n            x1 |= (y1 << 24);\n            y2m = (y1m << 16);\n            y2 = (y1 << 16);\n            y2m |= (x1m >> 8); /* a5+b5, a4+b4, a3+b3, a2+b2 */\n            y2 |= (x1 >> 8);\n            x1 += rnd2;\n            x1m += y2m;  /* a4+b4+a5+b5, ....., a1+b1+a2+b2 */\n            x1 += y2;\n            x1 &= (mask << 2);\n            x1m += (x1 >> 2);\n            *((uint32*)(pred_block += 4)) = x1m; /* save x1m */\n\n            y1m >>= 8;\n            y1 >>= 8;\n            y1m |= (x2m << 24); /* a8+b8, a7+b7, a6+b6, a5+b5 */\n            y1 |= (x2 << 24);\n            y2m = (x2m << 16);\n            y2 = (x2 << 16);\n            y2m |= (y1m >> 8); /*  a9+b9, a8+b8, a7+b7, a6+b6,*/\n            y2 |= (y1 >> 8);\n            y1 += rnd2;\n            y1m += y2m;  /* a8+b8+a9+b9, ....., a5+b5+a6+b6 */\n            y1 += y2;\n            y1 &= (mask << 2);\n            y1m += (y1 >> 2);\n            *((uint32*)(pred_block += 4)) = y1m; /* save y1m */\n\n            pred_block += offset2;\n            prev += offset;\n        }\n        return 1;\n    }\n    else if (tmp == 2)\n    {\n        prev -= 2; /* to word-aligned */\n        for (i = B_SIZE; i > 0; i--)\n        {\n            x1 = *((uint32*)prev); /* load a3 a2 a1 a0 */\n            x2 = *((uint32*)(prev + width)); /* load b3 b2 b1 b0, another line */\n            y1 = *((uint32*)(prev += 4)); /* a7 a6 a5 a4 */\n            y2 = *((uint32*)(prev + width)); /* b7 b6 b5 b4 */\n\n            x1m = (x1 >> 2) & mask; /* zero out last 2 bits */\n            x2m = (x2 >> 2) & mask;\n            x1 = x1 ^(x1m << 2);\n            x2 = x2 ^(x2m << 2);\n            x1m += x2m;\n            x1 += x2;\n\n            /* x2m, x2 free */\n            y1m = (y1 >> 2) & mask; /* zero out last 2 bits */\n            y2m = (y2 >> 2) & mask;\n            y1 = y1 ^(y1m << 2);\n            y2 = y2 ^(y2m << 2);\n            y1m += y2m;\n            y1 += y2;\n\n            /* y2m, y2 free */\n            /* x2m, x2 free */\n            x2 = *((uint32*)(prev += 4)); /* a11 a10 a9 a8 */\n            y2 = *((uint32*)(prev + width)); /* b11 b10 b9 b8 */\n            x2m = (x2 >> 2) & mask;\n            y2m = (y2 >> 2) & mask;\n            x2 = x2 ^(x2m << 2);\n            y2 = y2 ^(y2m << 2);\n            x2m += y2m;\n            x2 += y2;\n            /* y2m, y2 free */\n\n            /* now operate on x1m, x1, y1m, y1, x2m, x2 */\n            /* x1m = a3+b3, a2+b2, a1+b1, a0+b0 */\n            /* y1m = a7+b7, a6+b6, a5+b5, a4+b4 */\n            /* x2m = a11+b11, a10+b10, a9+b9, a8+b8 */\n            /* x1, y1, x2 */\n\n            x1m >>= 16 ;\n            x1 >>= 16;\n            x1m |= (y1m << 16);  /* a5+b5, a4+b4, a3+b3, a2+b2 */\n            x1 |= (y1 << 16);\n            y2m = (y1m << 8);\n            y2 = (y1 << 8);\n            y2m |= (x1m >> 8); /* a6+b6, a5+b5, a4+b4, a3+b3 */\n            y2 |= (x1 >> 8);\n            x1 += rnd2;\n            x1m += y2m;  /* a5+b5+a6+b6, ....., a2+b2+a3+b3 */\n            x1 += y2;\n            x1 &= (mask << 2);\n            x1m += (x1 >> 2);\n            *((uint32*)(pred_block += 4)) = x1m; /* save x1m */\n\n            y1m >>= 16;\n            y1 >>= 16;\n            y1m |= (x2m << 16); /* a9+b9, a8+b8, a7+b7, a6+b6 */\n            y1 |= (x2 << 16);\n            y2m = (x2m << 8);\n            y2 = (x2 << 8);\n            y2m |= (y1m >> 8); /*  a10+b10, a9+b9, a8+b8, a7+b7,*/\n            y2 |= (y1 >> 8);\n            y1 += rnd2;\n            y1m += y2m;  /* a9+b9+a10+b10, ....., a6+b6+a7+b7 */\n            y1 += y2;\n            y1 &= (mask << 2);\n            y1m += (y1 >> 2);\n            *((uint32*)(pred_block += 4)) = y1m; /* save y1m */\n\n            pred_block += offset2;\n            prev += offset;\n        }\n        return 1;\n    }\n    else /* tmp == 3 */\n    {\n        prev -= 3; /* to word-aligned */\n        for (i = B_SIZE; i > 0; i--)\n        {\n            x1 = *((uint32*)prev); /* load a3 a2 a1 a0 */\n            x2 = *((uint32*)(prev + width)); /* load b3 b2 b1 b0, another line */\n            y1 = *((uint32*)(prev += 4)); /* a7 a6 a5 a4 */\n            y2 = *((uint32*)(prev + width)); /* b7 b6 b5 b4 */\n\n            x1m = (x1 >> 2) & mask; /* zero out last 2 bits */\n            x2m = (x2 >> 2) & mask;\n            x1 = x1 ^(x1m << 2);\n            x2 = x2 ^(x2m << 2);\n            x1m += x2m;\n            x1 += x2;\n\n            /* x2m, x2 free */\n            y1m = (y1 >> 2) & mask; /* zero out last 2 bits */\n            y2m = (y2 >> 2) & mask;\n            y1 = y1 ^(y1m << 2);\n            y2 = y2 ^(y2m << 2);\n            y1m += y2m;\n            y1 += y2;\n\n            /* y2m, y2 free */\n            /* x2m, x2 free */\n            x2 = *((uint32*)(prev += 4)); /* a11 a10 a9 a8 */\n            y2 = *((uint32*)(prev + width)); /* b11 b10 b9 b8 */\n            x2m = (x2 >> 2) & mask;\n            y2m = (y2 >> 2) & mask;\n            x2 = x2 ^(x2m << 2);\n            y2 = y2 ^(y2m << 2);\n            x2m += y2m;\n            x2 += y2;\n            /* y2m, y2 free */\n\n            /* now operate on x1m, x1, y1m, y1, x2m, x2 */\n            /* x1m = a3+b3, a2+b2, a1+b1, a0+b0 */\n            /* y1m = a7+b7, a6+b6, a5+b5, a4+b4 */\n            /* x2m = a11+b11, a10+b10, a9+b9, a8+b8 */\n            /* x1, y1, x2 */\n\n            x1m >>= 24 ;\n            x1 >>= 24;\n            x1m |= (y1m << 8);  /* a6+b6, a5+b5, a4+b4, a3+b3 */\n            x1 |= (y1 << 8);\n\n            x1m += y1m;  /* a6+b6+a7+b7, ....., a3+b3+a4+b4 */\n            x1 += y1;\n            x1 += rnd2;\n            x1 &= (mask << 2);\n            x1m += (x1 >> 2);\n            *((uint32*)(pred_block += 4)) = x1m; /* save x1m */\n\n            y1m >>= 24;\n            y1 >>= 24;\n            y1m |= (x2m << 8); /* a10+b10, a9+b9, a8+b8, a7+b7 */\n            y1 |= (x2 << 8);\n            y1m += x2m;  /* a10+b10+a11+b11, ....., a7+b7+a8+b8 */\n            y1 += x2;\n            y1 += rnd2;\n            y1 &= (mask << 2);\n            y1m += (y1 >> 2);\n            *((uint32*)(pred_block += 4)) = y1m; /* save y1m */\n\n            pred_block += offset2;\n            prev += offset;\n        }\n        return 1;\n    }\n}\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/get_pred_outside.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*\n------------------------------------------------------------------------------\n INPUT AND OUTPUT DEFINITIONS\n\n Inputs:\n    xpos = x half-pixel of (x,y) coordinates within a VOP; motion\n           compensated coordinates; native data type\n    ypos = y half-pixel of (x,y) coordinates within a VOP; motion\n           compensated coordinates; native data type\n    comp = pointer to 8-bit compensated prediction values within a VOP;\n           computed by this module (i/o); full-pel resolution; 8-bit data\n    c_prev = pointer to previous 8-bit prediction values within a VOP;\n         values range from (0-255); full-pel resolution; 8-bit data\n    sh_d = pointer to residual values used to compensate the predicted\n           value; values range from (-512 to 511); full-pel resolution;\n           native data type\n    width = width of the VOP in pixels (x axis); full-pel resolution;\n        native data type\n    height = height of the VOP in pixels (y axis); full-pel resolution;\n         native data type\n    rnd1 = rounding value for case when one dimension uses half-pel\n           resolution; native data type\n    rnd2 = rounding value for case when two dimensions uses half-pel\n           resolution; native data type\n\n Outputs:\n    returns 1\n\n Local Stores/Buffers/Pointers Needed:\n    None\n\n Global Stores/Buffers/Pointers Needed:\n    None\n\n Pointers and Buffers Modified:\n    comp = buffer contains newly computed compensated prediction values\n\n Local Stores Modified:\n    None\n\n Global Stores Modified:\n    None\n\n------------------------------------------------------------------------------\n FUNCTION DESCRIPTION\n\n Summary:\n\n This function performs motion compensated prediction for the case where\n the motion vector points to a block outside the VOP. The function interpolates\n the pixels that are outside the VOP using the boundary pixels for the block.\n Once the values are interpolated, the pixel values are computed for a block\n in the current VOP. The prediction values are generated by averaging pixel\n values in the previous VOP; the block position in the previous frame is\n computed from the current block's motion vector. The computed pixel values\n are calculated by adding the prediction values to the block residual values.\n\n Details:\n\n First, this functions determines which VOP boundary(ies) the motion vector\n is outside, i.e., left, right, top, bottom. xpos is compared to the left and\n right boundaries; ypos is compared to the top and bottom boundaries. The number\n of block pixels inside the the boundary in the x and y directions are stored\n in endx and endy, respectively. If the entire block is inside the x or y\n boundary, the respectively end is set to 0.\n\n After the boundaries are tested, any pixels lying outside a boundary are\n interpolated from the boundary pixels. For example, if the block is outside the\n bottom boundary, boundary pixels alone the bottom of the VOP as used to\n interpolated those pixels lying outside the bottom boundary. The interpolation\n used is a simple column-wise or row-wise copy of the boundary pixels (inside the\n block) depending on which boundary the block is outside. In our example, each\n boundary pixel would be copied column-wise to the pixel beneath it. If the\n block was outside right boundary, the boundary pixels would be copied row-wise\n to the pixel to the right of it. If the block was outside both an x and y\n boundary, the boundary pixels would be copied row-wise for the portion of the\n block outside the x boundary, and column-wise for the portion of the block\n outside the y boundary. And so on.\n\n Once the pixel interpolation is complete, the motion compensated output values\n (comp[]) are calculed from the motion compensated prediction (pred[])values and\n the residual values (sh_d[]) of the current frame. The prediction values are\n generated by averaging pixel values in the previous VOP; the block position in\n the previous frame is computed from the current block's motion vector. The\n computed pixel values are calculated by adding the prediction values to the\n block residual values.\n\n*/\n\n/*----------------------------------------------------------------------------\n; INCLUDES\n----------------------------------------------------------------------------*/\n#include \"mp4dec_lib.h\"\n#include \"motion_comp.h\"\n\n#define PAD_CORNER {    temp = *prev; \\\n            temp |= (temp<<8);  \\\n            temp |= (temp<<16); \\\n            *((uint32*)ptr) = temp; \\\n            *((uint32*)(ptr+4)) = temp;  \\\n            *((uint32*)(ptr+=16)) = temp;  \\\n            *((uint32*)(ptr+4)) = temp;  \\\n            *((uint32*)(ptr+=16)) = temp;  \\\n            *((uint32*)(ptr+4)) = temp;  \\\n            *((uint32*)(ptr+=16)) = temp;  \\\n            *((uint32*)(ptr+4)) = temp;  \\\n            *((uint32*)(ptr+=16)) = temp;  \\\n            *((uint32*)(ptr+4)) = temp;  \\\n            *((uint32*)(ptr+=16)) = temp;  \\\n            *((uint32*)(ptr+4)) = temp;  \\\n            *((uint32*)(ptr+=16)) = temp;  \\\n            *((uint32*)(ptr+4)) = temp;  \\\n            *((uint32*)(ptr+=16)) = temp;  \\\n            *((uint32*)(ptr+4)) = temp;  }\n\n#define PAD_ROW  {  temp = *((uint32*)prev); \\\n                    temp2 = *((uint32*)(prev+4)); \\\n            *((uint32*)ptr) =  temp;\\\n            *((uint32*)(ptr+4)) =  temp2; \\\n            *((uint32*)(ptr+=16)) = temp; \\\n            *((uint32*)(ptr+4)) = temp2;\\\n            *((uint32*)(ptr+=16)) = temp; \\\n            *((uint32*)(ptr+4)) = temp2;\\\n            *((uint32*)(ptr+=16)) = temp; \\\n            *((uint32*)(ptr+4)) = temp2;\\\n            *((uint32*)(ptr+=16)) = temp; \\\n            *((uint32*)(ptr+4)) = temp2;\\\n            *((uint32*)(ptr+=16)) = temp; \\\n            *((uint32*)(ptr+4)) = temp2;\\\n            *((uint32*)(ptr+=16)) = temp; \\\n            *((uint32*)(ptr+4)) = temp2;\\\n            *((uint32*)(ptr+=16)) = temp; \\\n            *((uint32*)(ptr+4)) = temp2;}\n\n#define PAD_EXTRA_4x8           {   temp = *((uint32*)(prev+8)); \\\n                *((uint32*)ptr) =  temp; \\\n                *((uint32*)(ptr+=16)) = temp; \\\n                *((uint32*)(ptr+=16)) = temp; \\\n                *((uint32*)(ptr+=16)) = temp; \\\n                *((uint32*)(ptr+=16)) = temp; \\\n                *((uint32*)(ptr+=16)) = temp; \\\n                *((uint32*)(ptr+=16)) = temp; \\\n                *((uint32*)(ptr+=16)) = temp; }\n\n#define PAD_COL { temp = *prev; \\\n            temp|=(temp<<8);  temp|=(temp<<16); \\\n            *((uint32*)ptr) = temp; \\\n            *((uint32*)(ptr+4)) = temp; \\\n            temp = *(prev+=16); \\\n            temp|=(temp<<8);  temp|=(temp<<16); \\\n            *((uint32*)(ptr+=16)) = temp; \\\n            *((uint32*)(ptr+4)) = temp; \\\n            temp = *(prev+=16); \\\n            temp|=(temp<<8);  temp|=(temp<<16); \\\n            *((uint32*)(ptr+=16)) = temp; \\\n            *((uint32*)(ptr+4)) = temp; \\\n            temp = *(prev+=16); \\\n            temp|=(temp<<8);  temp|=(temp<<16); \\\n            *((uint32*)(ptr+=16)) = temp; \\\n            *((uint32*)(ptr+4)) = temp; \\\n            temp = *(prev+=16); \\\n            temp|=(temp<<8);  temp|=(temp<<16); \\\n            *((uint32*)(ptr+=16)) = temp; \\\n            *((uint32*)(ptr+4)) = temp; \\\n            temp = *(prev+=16); \\\n            temp|=(temp<<8);  temp|=(temp<<16); \\\n            *((uint32*)(ptr+=16)) = temp; \\\n            *((uint32*)(ptr+4)) = temp; \\\n            temp = *(prev+=16); \\\n            temp|=(temp<<8);  temp|=(temp<<16); \\\n            *((uint32*)(ptr+=16)) = temp; \\\n            *((uint32*)(ptr+4)) = temp; \\\n            temp = *(prev+=16); \\\n            temp|=(temp<<8);  temp|=(temp<<16); \\\n            *((uint32*)(ptr+=16)) = temp; \\\n            *((uint32*)(ptr+4)) = temp;}\n\n/* copy 8x8 block */\n#define COPY_BLOCK  {           *((uint32*)ptr) = *((uint32*)prev); \\\n            *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \\\n            *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \\\n            *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \\\n            *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \\\n            *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \\\n            *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \\\n            *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \\\n            *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \\\n            *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \\\n            *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \\\n            *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \\\n            *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \\\n            *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \\\n            *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \\\n            *((uint32*)(ptr+4)) = *((uint32*)(prev+4));  }\n\n#define COPY_12x8       {       *((uint32*)ptr) = *((uint32*)prev); \\\n            *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \\\n            *((uint32*)(ptr+8)) = *((uint32*)(prev+8)); \\\n            *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \\\n            *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \\\n            *((uint32*)(ptr+8)) = *((uint32*)(prev+8)); \\\n            *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \\\n            *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \\\n            *((uint32*)(ptr+8)) = *((uint32*)(prev+8)); \\\n            *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \\\n            *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \\\n            *((uint32*)(ptr+8)) = *((uint32*)(prev+8)); \\\n            *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \\\n            *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \\\n            *((uint32*)(ptr+8)) = *((uint32*)(prev+8)); \\\n            *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \\\n            *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \\\n            *((uint32*)(ptr+8)) = *((uint32*)(prev+8)); \\\n            *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \\\n            *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \\\n            *((uint32*)(ptr+8)) = *((uint32*)(prev+8)); \\\n            *((uint32*)(ptr+=16)) = *((uint32*)(prev+=width)); \\\n            *((uint32*)(ptr+4)) = *((uint32*)(prev+4)); \\\n            *((uint32*)(ptr+8)) = *((uint32*)(prev+8)); }\n\n/*----------------------------------------------------------------------------\n; FUNCTION CODE\n----------------------------------------------------------------------------*/\nint GetPredOutside(\n    int xpos,       /* i */\n    int ypos,       /* i */\n    uint8 *c_prev,      /* i */\n    uint8 *pred_block,      /* i */\n    int width,      /* i */\n    int height,     /* i */\n    int rnd1,       /* i */\n    int pred_width\n)\n{\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    uint8   *prev;      /* pointers to adjacent pixels in the    */\n    uint8   pred[256];  /* storage for padded pixel values, 16x16 */\n    uint8   *ptr;\n    int xoffset;\n    uint32 temp, temp2;\n\n    /*----------------------------------------------------------------------------\n    ; Function body here\n    ----------------------------------------------------------------------------*/\n    /* saturate xpos and ypos */\n    if (xpos < -16) xpos = -16;\n    if (xpos > ((width - 1) << 1)) xpos = (width - 1) << 1;\n    if (ypos < -16) ypos = -16;\n    if (ypos > ((height - 1) << 1)) ypos = (height - 1) << 1;\n\n    if (xpos < 0)\n    {\n        if (ypos < 0) /* pad top left of frame */\n        {\n            /* copy the block */\n            ptr = pred + (8 << 4) + 8;\n            prev = c_prev;\n            COPY_BLOCK\n\n            /* pad the corner */\n            ptr = pred;\n            prev = pred + (8 << 4) + 8;\n            PAD_CORNER\n\n            /* pad top */\n            ptr = pred + 8;\n            prev = pred + (8 << 4) + 8;\n            PAD_ROW\n\n            /* pad left */\n            ptr = pred + (8 << 4);\n            prev = pred + (8 << 4) + 8;\n            PAD_COL\n\n\n            ptr = pred + (((ypos >> 1) + 8) << 4) + (xpos >> 1) + 8;\n\n            GetPredAdvBTable[ypos&1][xpos&1](ptr, pred_block, 16, (pred_width << 1) | rnd1);\n\n            return 1;\n        }\n        else if ((ypos >> 1) < (height - B_SIZE)) /* pad left of frame */\n        {\n            /* copy block */\n            ptr = pred + 8;\n            prev = c_prev + (ypos >> 1) * width;\n            COPY_BLOCK\n            /* copy extra line */\n            *((uint32*)(ptr += 16)) = *((uint32*)(prev += width));\n            *((uint32*)(ptr + 4)) = *((uint32*)(prev + 4));\n\n            /* pad left */\n            ptr = pred;\n            prev = pred + 8;\n            PAD_COL\n            /* pad extra line */\n            temp = *(prev += 16);\n            temp |= (temp << 8);\n            temp |= (temp << 16);\n            *((uint32*)(ptr += 16)) = temp;\n            *((uint32*)(ptr + 4)) = temp;\n\n            ptr = pred + 8 + (xpos >> 1);\n\n            GetPredAdvBTable[ypos&1][xpos&1](ptr, pred_block, 16, (pred_width << 1) | rnd1);\n\n            return 1;\n        }\n        else /* pad bottom left of frame */\n        {\n            /* copy the block */\n            ptr = pred + 8; /* point to the center */\n            prev = c_prev + width * (height - 8);\n            COPY_BLOCK\n\n            /* pad the corner */\n            ptr = pred + (8 << 4);\n            prev = ptr - 8;\n            PAD_CORNER\n\n            /* pad bottom */\n            ptr = pred + (8 << 4) + 8;\n            prev = ptr - 16;\n            PAD_ROW\n\n            /* pad left */\n            ptr = pred ;\n            prev = ptr + 8;\n            PAD_COL\n\n            ptr = pred + 8 + (((ypos >> 1) - (height - 8)) << 4) + (xpos >> 1);\n\n            GetPredAdvBTable[ypos&1][xpos&1](ptr, pred_block, 16, (pred_width << 1) | rnd1);\n\n            return 1;\n        }\n    }\n    else if ((xpos >> 1) < (width - B_SIZE))\n    {\n        if (ypos < 0) /* pad top of frame */\n        {\n            xoffset = xpos >> 1;\n            xoffset = xoffset & 0x3; /* word align ptr */\n\n            /* copy block */\n            ptr = pred + (8 << 4);\n            prev = c_prev + (xpos >> 1) - xoffset;\n\n            if (xoffset || (xpos&1)) /* copy extra 4x8 */\n            {\n                COPY_12x8\n            }\n            else\n            {\n                COPY_BLOCK\n            }\n\n            /* pad top */\n            ptr = pred;\n            prev = pred + (8 << 4);\n            PAD_ROW\n            if (xoffset || (xpos&1)) /* pad extra 4x8 */\n            {\n                ptr = pred + 8;\n                PAD_EXTRA_4x8\n            }\n\n            ptr = pred + (((ypos >> 1) + 8) << 4) + xoffset;\n\n            GetPredAdvBTable[ypos&1][xpos&1](ptr, pred_block, 16, (pred_width << 1) | rnd1);\n\n            return 1;\n        }\n        else /* pad bottom of frame */\n        {\n            xoffset = xpos >> 1;\n            xoffset = xoffset & 0x3; /* word align ptr */\n            /* copy block */\n            ptr = pred ;\n            prev = c_prev + width * (height - 8) + (xpos >> 1) - xoffset;\n            if (xoffset  || (xpos&1))\n            {\n                COPY_12x8\n            }\n            else\n            {\n                COPY_BLOCK\n            }\n\n            /* pad bottom */\n            ptr = pred + (8 << 4);\n            prev = ptr - 16;\n            PAD_ROW\n            if (xoffset || (xpos&1))\n            {\n                ptr = pred + (8 << 4) + 8;\n                PAD_EXTRA_4x8\n            }\n\n            ptr = pred + (((ypos >> 1) - (height - 8)) << 4) + xoffset;\n\n            GetPredAdvBTable[ypos&1][xpos&1](ptr, pred_block, 16, (pred_width << 1) | rnd1);\n\n            return 1;\n        }\n    }\n    else\n    {\n        if (ypos < 0) /* pad top right of frame */\n        {\n            /* copy block */\n            ptr = pred + (8 << 4);\n            prev = c_prev + width - 8;\n            COPY_BLOCK\n\n            /* pad top-right */\n            ptr = pred + 8;\n            prev = pred + (8 << 4) + 7;\n            PAD_CORNER\n\n            /* pad top */\n            ptr = pred ;\n            prev = pred + (8 << 4);\n            PAD_ROW;\n\n            /* pad right */\n            ptr = pred + (8 << 4) + 8;\n            prev = ptr - 1;\n            PAD_COL;\n\n            ptr = pred + ((8 + (ypos >> 1)) << 4) + (8 - (width - (xpos >> 1)));\n\n            GetPredAdvBTable[ypos&1][xpos&1](ptr, pred_block, 16, (pred_width << 1) | rnd1);\n\n            return 1;\n        }\n        else if ((ypos >> 1) < (height - B_SIZE)) /* pad right of frame */\n        {\n            /* copy block */\n            ptr = pred;\n            prev = c_prev + (ypos >> 1) * width + width - 8;\n            COPY_BLOCK\n            /* copy extra line */\n            *((uint32*)(ptr += 16)) = *((uint32*)(prev += width));\n            *((uint32*)(ptr + 4)) = *((uint32*)(prev + 4));\n\n            /* pad right */\n            ptr = pred + 8;\n            prev = ptr - 1;\n            PAD_COL;\n            /* pad extra line */\n            temp = *(prev += 16);\n            temp |= (temp << 8);\n            temp |= (temp << 16);\n            *((uint32*)(ptr += 16)) = temp;\n            *((uint32*)(ptr + 4)) = temp;\n\n\n            ptr = pred + 8 - (width - (xpos >> 1));\n\n            GetPredAdvBTable[ypos&1][xpos&1](ptr, pred_block, 16, (pred_width << 1) | rnd1);\n\n            return 1;\n\n        }\n        else /* pad bottom right of frame */\n        {\n            /* copy block */\n            ptr = pred;\n            prev = c_prev + width * (height - 8) + width - 8;\n            COPY_BLOCK\n\n            /* pad bottom-right */\n            ptr = pred + (8 << 4) + 8;\n            prev = ptr - 17;\n            PAD_CORNER\n\n            /* pad right */\n            ptr = pred + 8;\n            prev = ptr - 1;\n            PAD_COL\n\n            /* pad bottom */\n            ptr = pred + (8 << 4);\n            prev = ptr - 16;\n            PAD_ROW\n\n            ptr = pred + 8 - (width - (xpos >> 1)) + ((8 - (height - (ypos >> 1))) << 4);\n\n            GetPredAdvBTable[ypos&1][xpos&1](ptr, pred_block, 16, (pred_width << 1) | rnd1);\n\n            return 1;\n        }\n    }\n}\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/idct.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*\n------------------------------------------------------------------------------\n MODULE DESCRIPTION\n\n This file contains the functions that transform an 8r8 image block from\n dequantized DCT coefficients to spatial domain pirel values by calculating\n inverse discrete cosine transform (IDCT).\n\n------------------------------------------------------------------------------\n*/\n/*----------------------------------------------------------------------------\n; INCLUDES\n----------------------------------------------------------------------------*/\n#include \"mp4dec_lib.h\"\n#include \"idct.h\"\n#include \"motion_comp.h\"\n#ifndef FAST_IDCT\n\n/*\n------------------------------------------------------------------------------\n FUNCTION NAME: idct\n------------------------------------------------------------------------------\n INPUT AND OUTPUT DEFINITIONS FOR idct\n\n Inputs:\n    blk = pointer to the buffer containing the dequantized DCT\n          coefficients of type int for an 8r8 image block;\n          values range from (-2048, 2047) which defined as standard.\n\n Local Stores/Buffers/Pointers Needed:\n    None\n\n Global Stores/Buffers/Pointers Needed:\n    None\n\n Outputs:\n    None\n\n Pointers and Buffers Modified:\n    blk points to the found IDCT values for an 8r8 image block.\n\n Local Stores Modified:\n    None\n\n Global Stores Modified:\n    None\n\n------------------------------------------------------------------------------\n FUNCTION DESCRIPTION FOR idct\n\n This function transforms an 8r8 image block from dequantized DCT coefficients\n (F(u,v)) to spatial domain pirel values (f(r,y)) by performing the two\n dimensional inverse discrete cosine transform (IDCT).\n\n         _7_ _7_      C(u) C(v)\n    f(r,y) = \\   \\  F(u,v)---- ----cos[(2r+1)*u*pi/16]cos[(2y+1)*v*pi/16]\n         /__ /__    2    2\n         u=0 v=0\n\n    where   C(i) = 1/sqrt(2)    if i=0\n        C(i) = 1        otherwise\n\n 2-D IDCT can be separated as horizontal(row-wise) and vertical(column-wise)\n 1-D IDCTs. Therefore, 2-D IDCT values are found by the following two steps:\n 1. Find horizontal 1-D IDCT values for each row from 8r8 dequantized DCT\n    coefficients by row IDCT operation.\n\n          _7_        C(u)\n    g(r,v) =  \\   F(u,v) ---- cos[(2r+1)*u*pi/16]\n          /__         2\n          u=0\n\n 2. Find vertical 1-D IDCT values for each column from the results of 1\n    by column IDCT operation.\n\n              _7_        C(v)\n    f(r,y) =  \\   g(r,v) ---- cos[(2y+1)*v*pi/16]\n          /__         2\n          v=0\n\n------------------------------------------------------------------------------\n REQUIREMENTS FOR idct\n\n None\n\n------------------------------------------------------------------------------\n*/\n/*  REFERENCES FOR idct */\n/* idct.c, inverse fast discrete cosine transform\n inverse two dimensional DCT, Chen-Wang algorithm\n (cf. IEEE ASSP-32, pp. 803-816, Aug. 1984)\n 32-bit integer arithmetic (8 bit coefficients)\n 11 mults, 29 adds per DCT\n sE, 18.8.91\n\n coefficients ertended to 12 bit for IEEE1180-1990\n compliance                           sE,  2.1.94\n*/\n\n\n/*----------------------------------------------------------------------------\n; Function Code FOR idct\n----------------------------------------------------------------------------*/\nvoid idct_intra(\n    int *blk, uint8 *comp, int width\n)\n{\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    int i;\n    int32   tmpBLK[64];\n    int32   *tmpBLK32 = &tmpBLK[0];\n    int32   r0, r1, r2, r3, r4, r5, r6, r7, r8; /* butterfly nodes */\n    int32   a;\n    int offset = width - 8;\n    /*----------------------------------------------------------------------------\n    ; Function body here\n    ----------------------------------------------------------------------------*/\n    /* two dimensional inverse discrete cosine transform */\n\n\n    /* column (vertical) IDCT */\n    for (i = B_SIZE - 1; i >= 0; i--)\n    {\n        /* initialize butterfly nodes at first stage */\n\n        r1 = blk[B_SIZE * 4 + i] << 11;\n        /* since row IDCT results have net left shift by 3 */\n        /* this left shift by 8 gives net left shift by 11 */\n        /* in order to maintain the same scale as that of  */\n        /* coefficients Wi */\n\n        r2 = blk[B_SIZE * 6 + i];\n        r3 = blk[B_SIZE * 2 + i];\n        r4 = blk[B_SIZE * 1 + i];\n        r5 = blk[B_SIZE * 7 + i];\n        r6 = blk[B_SIZE * 5 + i];\n        r7 = blk[B_SIZE * 3 + i];\n\n        if (!(r1 | r2 | r3 | r4 | r5 | r6 | r7))\n        {\n            /* shortcut */\n            /* execute if values of g(r,1) to g(r,7) in a column*/\n            /* are all zeros */\n\n            /* make output of IDCT >>3 or scaled by 1/8 and */\n            /* with the proper rounding */\n            a = (blk[B_SIZE * 0 + i]) << 3;\n            tmpBLK32[B_SIZE * 0 + i] = a;\n            tmpBLK32[B_SIZE * 1 + i] = a;\n            tmpBLK32[B_SIZE * 2 + i] = a;\n            tmpBLK32[B_SIZE * 3 + i] = a;\n            tmpBLK32[B_SIZE * 4 + i] = a;\n            tmpBLK32[B_SIZE * 5 + i] = a;\n            tmpBLK32[B_SIZE * 6 + i] = a;\n            tmpBLK32[B_SIZE * 7 + i] = a;\n        }\n        else\n        {\n            r0 = (blk[8 * 0 + i] << 11) + 128;\n\n            /* first stage */\n\n            r8 = W7 * (r4 + r5);\n            r4 = (r8 + (W1 - W7) * r4);\n            /* Multiplication with Wi increases the net left */\n            /* shift from 11 to 14,we have to shift back by 3*/\n            r5 = (r8 - (W1 + W7) * r5);\n            r8 = W3 * (r6 + r7);\n            r6 = (r8 - (W3 - W5) * r6);\n            r7 = (r8 - (W3 + W5) * r7);\n\n            /* second stage */\n            r8 = r0 + r1;\n            r0 -= r1;\n\n            r1 = W6 * (r3 + r2);\n            r2 = (r1 - (W2 + W6) * r2);\n            r3 = (r1 + (W2 - W6) * r3);\n\n            r1 = r4 + r6;\n            r4 -= r6;\n            r6 = r5 + r7;\n            r5 -= r7;\n\n            /* third stage */\n            r7 = r8 + r3;\n            r8 -= r3;\n            r3 = r0 + r2;\n            r0 -= r2;\n            r2 = (181 * (r4 + r5) + 128) >> 8;  /* rounding */\n            r4 = (181 * (r4 - r5) + 128) >> 8;\n\n            /* fourth stage */\n            /* net shift of IDCT is >>3 after the following */\n            /* shift operation, it makes output of 2-D IDCT */\n            /* scaled by 1/8, that is scaled twice by       */\n            /* 1/(2*sqrt(2)) for row IDCT and column IDCT.  */\n            /* see detail analysis in design doc.           */\n            tmpBLK32[0 + i] = (r7 + r1) >> 8;\n            tmpBLK32[(1<<3) + i] = (r3 + r2) >> 8;\n            tmpBLK32[(2<<3) + i] = (r0 + r4) >> 8;\n            tmpBLK32[(3<<3) + i] = (r8 + r6) >> 8;\n            tmpBLK32[(4<<3) + i] = (r8 - r6) >> 8;\n            tmpBLK32[(5<<3) + i] = (r0 - r4) >> 8;\n            tmpBLK32[(6<<3) + i] = (r3 - r2) >> 8;\n            tmpBLK32[(7<<3) + i] = (r7 - r1) >> 8;\n        }\n    }\n    /* row (horizontal) IDCT */\n    for (i = 0 ; i < B_SIZE; i++)\n    {\n        /* initialize butterfly nodes at the first stage */\n\n        r1 = ((int32)tmpBLK32[4+(i<<3)]) << 8;\n        /* r1 left shift by 11 is to maintain the same  */\n        /* scale as that of coefficients (W1,...W7) */\n        /* since blk[4] won't multiply with Wi.     */\n        /* see detail diagram in design document.   */\n\n        r2 = tmpBLK32[6+(i<<3)];\n        r3 = tmpBLK32[2+(i<<3)];\n        r4 = tmpBLK32[1+(i<<3)];\n        r5 = tmpBLK32[7+(i<<3)];\n        r6 = tmpBLK32[5+(i<<3)];\n        r7 = tmpBLK32[3+(i<<3)];\n\n        if (!(r1 | r2 | r3 | r4 | r5 | r6 | r7))\n        {\n            /* shortcut */\n            /* execute if values of F(1,v) to F(7,v) in a row*/\n            /* are all zeros */\n\n            /* output of row IDCT scaled by 8 */\n            a = (((int32)tmpBLK32[0+(i<<3)] + 32) >> 6);\n            CLIP_RESULT(a)\n            *comp++ = a;\n            *comp++ = a;\n            *comp++ = a;\n            *comp++ = a;\n            *comp++ = a;\n            *comp++ = a;\n            *comp++ = a;\n            *comp++ = a;\n\n            comp += offset;\n        }\n\n        else\n        {\n            /* for proper rounding in the fourth stage */\n            r0 = (((int32)tmpBLK32[0+(i<<3)]) << 8) + 8192;\n\n            /* first stage */\n\n            r8 = W7 * (r4 + r5) + 4;\n            r4 = (r8 + (W1 - W7) * r4) >> 3;\n            r5 = (r8 - (W1 + W7) * r5) >> 3;\n\n            r8 = W3 * (r6 + r7) + 4;\n            r6 = (r8 - (W3 - W5) * r6) >> 3;\n            r7 = (r8 - (W3 + W5) * r7) >> 3;\n\n            /* second stage */\n            r8 = r0 + r1;\n            r0 -= r1;\n\n            r1 = W6 * (r3 + r2) + 4;\n            r2 = (r1 - (W2 + W6) * r2) >> 3;\n            r3 = (r1 + (W2 - W6) * r3) >> 3;\n\n            r1 = r4 + r6;\n            r4 -= r6;\n            r6 = r5 + r7;\n            r5 -= r7;\n\n            /* third stage */\n            r7 = r8 + r3;\n            r8 -= r3;\n            r3 = r0 + r2;\n            r0 -= r2;\n            r2 = (181 * (r4 + r5) + 128) >> 8;    /* rounding */\n            r4 = (181 * (r4 - r5) + 128) >> 8;\n\n            /* fourth stage */\n            /* net shift of this function is <<3 after the    */\n            /* following shift operation, it makes output of  */\n            /* row IDCT scaled by 8 to retain 3 bits precision*/\n            a = ((r7 + r1) >> 14);\n            CLIP_RESULT(a)\n            *comp++ = a;\n            a = ((r3 + r2) >> 14);\n            CLIP_RESULT(a)\n            *comp++ = a;\n            a = ((r0 + r4) >> 14);\n            CLIP_RESULT(a)\n            *comp++ = a;\n            a = ((r8 + r6) >> 14);\n            CLIP_RESULT(a)\n            *comp++ = a;\n            a = ((r8 - r6) >> 14);\n            CLIP_RESULT(a)\n            *comp++ = a;\n            a = ((r0 - r4) >> 14);\n            CLIP_RESULT(a)\n            *comp++ = a;\n            a = ((r3 - r2) >> 14);\n            CLIP_RESULT(a)\n            *comp++ = a;\n            a = ((r7 - r1) >> 14);\n            CLIP_RESULT(a)\n            *comp++ = a;\n\n            comp += offset;\n        }\n    }\n\n\n\n    /*----------------------------------------------------------------------------\n    ; Return nothing or data or data pointer\n    ----------------------------------------------------------------------------*/\n    return;\n}\n\nvoid idct(\n    int *blk, uint8 *pred, uint8 *dst, int width)\n{\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    int i;\n    int32   tmpBLK[64];\n    int32   *tmpBLK32 = &tmpBLK[0];\n    int32   r0, r1, r2, r3, r4, r5, r6, r7, r8; /* butterfly nodes */\n    int32   a;\n    int res;\n\n    /*----------------------------------------------------------------------------\n    ; Function body here\n    ----------------------------------------------------------------------------*/\n    /* two dimensional inverse discrete cosine transform */\n\n\n    /* column (vertical) IDCT */\n    for (i = B_SIZE - 1; i >= 0; i--)\n    {\n        /* initialize butterfly nodes at first stage */\n\n        r1 = blk[B_SIZE * 4 + i] << 11;\n        /* since row IDCT results have net left shift by 3 */\n        /* this left shift by 8 gives net left shift by 11 */\n        /* in order to maintain the same scale as that of  */\n        /* coefficients Wi */\n\n        r2 = blk[B_SIZE * 6 + i];\n        r3 = blk[B_SIZE * 2 + i];\n        r4 = blk[B_SIZE * 1 + i];\n        r5 = blk[B_SIZE * 7 + i];\n        r6 = blk[B_SIZE * 5 + i];\n        r7 = blk[B_SIZE * 3 + i];\n\n        if (!(r1 | r2 | r3 | r4 | r5 | r6 | r7))\n        {\n            /* shortcut */\n            /* execute if values of g(r,1) to g(r,7) in a column*/\n            /* are all zeros */\n\n            /* make output of IDCT >>3 or scaled by 1/8 and */\n            /* with the proper rounding */\n            a = (blk[B_SIZE * 0 + i]) << 3;\n            tmpBLK32[B_SIZE * 0 + i] = a;\n            tmpBLK32[B_SIZE * 1 + i] = a;\n            tmpBLK32[B_SIZE * 2 + i] = a;\n            tmpBLK32[B_SIZE * 3 + i] = a;\n            tmpBLK32[B_SIZE * 4 + i] = a;\n            tmpBLK32[B_SIZE * 5 + i] = a;\n            tmpBLK32[B_SIZE * 6 + i] = a;\n            tmpBLK32[B_SIZE * 7 + i] = a;\n        }\n        else\n        {\n            r0 = (blk[8 * 0 + i] << 11) + 128;\n\n            /* first stage */\n\n            r8 = W7 * (r4 + r5);\n            r4 = (r8 + (W1 - W7) * r4);\n            /* Multiplication with Wi increases the net left */\n            /* shift from 11 to 14,we have to shift back by 3*/\n            r5 = (r8 - (W1 + W7) * r5);\n            r8 = W3 * (r6 + r7);\n            r6 = (r8 - (W3 - W5) * r6);\n            r7 = (r8 - (W3 + W5) * r7);\n\n            /* second stage */\n            r8 = r0 + r1;\n            r0 -= r1;\n\n            r1 = W6 * (r3 + r2);\n            r2 = (r1 - (W2 + W6) * r2);\n            r3 = (r1 + (W2 - W6) * r3);\n\n            r1 = r4 + r6;\n            r4 -= r6;\n            r6 = r5 + r7;\n            r5 -= r7;\n\n            /* third stage */\n            r7 = r8 + r3;\n            r8 -= r3;\n            r3 = r0 + r2;\n            r0 -= r2;\n            r2 = (181 * (r4 + r5) + 128) >> 8;  /* rounding */\n            r4 = (181 * (r4 - r5) + 128) >> 8;\n\n            /* fourth stage */\n            /* net shift of IDCT is >>3 after the following */\n            /* shift operation, it makes output of 2-D IDCT */\n            /* scaled by 1/8, that is scaled twice by       */\n            /* 1/(2*sqrt(2)) for row IDCT and column IDCT.  */\n            /* see detail analysis in design doc.           */\n            tmpBLK32[0 + i] = (r7 + r1) >> 8;\n            tmpBLK32[(1<<3) + i] = (r3 + r2) >> 8;\n            tmpBLK32[(2<<3) + i] = (r0 + r4) >> 8;\n            tmpBLK32[(3<<3) + i] = (r8 + r6) >> 8;\n            tmpBLK32[(4<<3) + i] = (r8 - r6) >> 8;\n            tmpBLK32[(5<<3) + i] = (r0 - r4) >> 8;\n            tmpBLK32[(6<<3) + i] = (r3 - r2) >> 8;\n            tmpBLK32[(7<<3) + i] = (r7 - r1) >> 8;\n        }\n    }\n    /* row (horizontal) IDCT */\n    for (i = B_SIZE - 1; i >= 0; i--)\n    {\n        /* initialize butterfly nodes at the first stage */\n\n        r1 = ((int32)tmpBLK32[4+(i<<3)]) << 8;\n        /* r1 left shift by 11 is to maintain the same  */\n        /* scale as that of coefficients (W1,...W7) */\n        /* since blk[4] won't multiply with Wi.     */\n        /* see detail diagram in design document.   */\n\n        r2 = tmpBLK32[6+(i<<3)];\n        r3 = tmpBLK32[2+(i<<3)];\n        r4 = tmpBLK32[1+(i<<3)];\n        r5 = tmpBLK32[7+(i<<3)];\n        r6 = tmpBLK32[5+(i<<3)];\n        r7 = tmpBLK32[3+(i<<3)];\n\n        if (!(r1 | r2 | r3 | r4 | r5 | r6 | r7))\n        {\n            /* shortcut */\n            /* execute if values of F(1,v) to F(7,v) in a row*/\n            /* are all zeros */\n\n            /* output of row IDCT scaled by 8 */\n            a = (tmpBLK32[0+(i<<3)] + 32) >> 6;\n            blk[0+(i<<3)] = a;\n            blk[1+(i<<3)] = a;\n            blk[2+(i<<3)] = a;\n            blk[3+(i<<3)] = a;\n            blk[4+(i<<3)] = a;\n            blk[5+(i<<3)] = a;\n            blk[6+(i<<3)] = a;\n            blk[7+(i<<3)] = a;\n\n        }\n\n        else\n        {\n            /* for proper rounding in the fourth stage */\n            r0 = (((int32)tmpBLK32[0+(i<<3)]) << 8) + 8192;\n\n            /* first stage */\n\n            r8 = W7 * (r4 + r5) + 4;\n            r4 = (r8 + (W1 - W7) * r4) >> 3;\n            r5 = (r8 - (W1 + W7) * r5) >> 3;\n\n            r8 = W3 * (r6 + r7) + 4;\n            r6 = (r8 - (W3 - W5) * r6) >> 3;\n            r7 = (r8 - (W3 + W5) * r7) >> 3;\n\n            /* second stage */\n            r8 = r0 + r1;\n            r0 -= r1;\n\n            r1 = W6 * (r3 + r2) + 4;\n            r2 = (r1 - (W2 + W6) * r2) >> 3;\n            r3 = (r1 + (W2 - W6) * r3) >> 3;\n\n            r1 = r4 + r6;\n            r4 -= r6;\n            r6 = r5 + r7;\n            r5 -= r7;\n\n            /* third stage */\n            r7 = r8 + r3;\n            r8 -= r3;\n            r3 = r0 + r2;\n            r0 -= r2;\n            r2 = (181 * (r4 + r5) + 128) >> 8;    /* rounding */\n            r4 = (181 * (r4 - r5) + 128) >> 8;\n\n            /* fourth stage */\n            /* net shift of this function is <<3 after the    */\n            /* following shift operation, it makes output of  */\n            /* row IDCT scaled by 8 to retain 3 bits precision*/\n            blk[0+(i<<3)] = (r7 + r1) >> 14;\n            blk[1+(i<<3)] = (r3 + r2) >> 14;\n            blk[2+(i<<3)] = (r0 + r4) >> 14;\n            blk[3+(i<<3)] = (r8 + r6) >> 14;\n            blk[4+(i<<3)] = (r8 - r6) >> 14;\n            blk[5+(i<<3)] = (r0 - r4) >> 14;\n            blk[6+(i<<3)] = (r3 - r2) >> 14;\n            blk[7+(i<<3)] = (r7 - r1) >> 14;\n        }\n        /*  add with prediction ,  08/03/05 */\n        res = (*pred++ + block[0+(i<<3)]);\n        CLIP_RESULT(res);\n        *dst++ = res;\n        res = (*pred++ + block[1+(i<<3)]);\n        CLIP_RESULT(res);\n        *dst++ = res;\n        res = (*pred++ + block[2+(i<<3)]);\n        CLIP_RESULT(res);\n        *dst++ = res;\n        res = (*pred++ + block[3+(i<<3)]);\n        CLIP_RESULT(res);\n        *dst++ = res;\n        res = (*pred++ + block[4+(i<<3)]);\n        CLIP_RESULT(res);\n        *dst++ = res;\n        res = (*pred++ + block[5+(i<<3)]);\n        CLIP_RESULT(res);\n        *dst++ = res;\n        res = (*pred++ + block[6+(i<<3)]);\n        CLIP_RESULT(res);\n        *dst++ = res;\n        res = (*pred++ + block[7+(i<<3)]);\n        CLIP_RESULT(res);\n        *dst++ = res;\n\n        pred += 8;\n        dst += (width - 8);\n    }\n\n\n\n    /*----------------------------------------------------------------------------\n    ; Return nothing or data or data pointer\n    ----------------------------------------------------------------------------*/\n    return;\n}\n\n#endif\n/*----------------------------------------------------------------------------\n; End Function: idct\n----------------------------------------------------------------------------*/\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/idct.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef idct_h\n#define idct_h\n\n/*----------------------------------------------------------------------------\n; INCLUDES\n----------------------------------------------------------------------------*/\n#include \"mp4dec_lib.h\"\n\n/*----------------------------------------------------------------------------\n; MACROS\n; Define module specific macros here\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; DEFINES\n; Include all pre-processor statements here.\n----------------------------------------------------------------------------*/\n#define INTEGER_IDCT\n\n#ifdef FAST_IDCT\n#ifndef INTEGER_IDCT\n#define INTEGER_IDCT\n#endif\n#endif\n\n#ifdef FAST_IDCT\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n    void idctrow0(int16 *blk, uint8 *pred, uint8 *dst, int width);\n    void idctrow1(int16 *blk, uint8 *pred, uint8 *dst, int width);\n    void idctrow2(int16 *blk, uint8 *pred, uint8 *dst, int width);\n    void idctrow3(int16 *blk, uint8 *pred, uint8 *dst, int width);\n    void idctrow4(int16 *blk, uint8 *pred, uint8 *dst, int width);\n    void idctcol0(int16 *blk);\n    void idctcol1(int16 *blk);\n    void idctcol2(int16 *blk);\n    void idctcol3(int16 *blk);\n    void idctcol4(int16 *blk);\n\n    void idctrow0_intra(int16 *blk, PIXEL *comp, int width);\n    void idctrow1_intra(int16 *blk, PIXEL *comp, int width);\n    void idctrow2_intra(int16 *blk, PIXEL *comp, int width);\n    void idctrow3_intra(int16 *blk, PIXEL *comp, int width);\n    void idctrow4_intra(int16 *blk, PIXEL *comp, int width);\n#ifdef __cplusplus\n}\n#endif\n#endif\n\n/* this code assumes \">>\" to be a two's-complement arithmetic */\n/* right shift: (-2)>>1 == -1 , (-3)>>1 == -2                 */\n\n/* a positive real constant is converted to an integer scaled by 2048 */\n/* or equivalent to left shift by 11 */\n\n#define W1 2841                 /* 2048*sqrt(2)*cos(1*pi/16) */\n#define W2 2676                 /* 2048*sqrt(2)*cos(2*pi/16) */\n#define W3 2408                 /* 2048*sqrt(2)*cos(3*pi/16) */\n#define W5 1609                 /* 2048*sqrt(2)*cos(5*pi/16) */\n#define W6 1108                 /* 2048*sqrt(2)*cos(6*pi/16) */\n#define W7 565                  /* 2048*sqrt(2)*cos(7*pi/16) */\n#define W1mW7 2276\n#define W1pW7 3406\n#define W5mW3 -799\n#define mW3mW5 -4017\n#define mW2mW6 -3784\n#define W2mW6 1568\n\n/* left shift by 11 is to maintain the accuracy of the decimal point */\n/* for the transform coefficients (W1,...W7) */\n\n/*----------------------------------------------------------------------------\n; EXTERNAL VARIABLES REFERENCES\n; Declare variables used in this module but defined elsewhere\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; SIMPLE TYPEDEF'S\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; ENUMERATED TYPEDEF'S\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; STRUCTURES TYPEDEF'S\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; GLOBAL FUNCTION DEFINITIONS\n; Function Prototype declaration\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; END\n----------------------------------------------------------------------------*/\n#endif\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/idct_vca.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"mp4def.h\"\n#include \"idct.h\"\n#include \"motion_comp.h\"\n\n#ifdef FAST_IDCT\n\n/****************************************************************\n*       vca_idct.c : created 6/1/99 for several options\n*                     of hard-coded reduced idct function (using nz_coefs)\n******************************************************************/\n\n/*****************************************************/\n//pretested version\nvoid idctrow0(int16 *blk, uint8 *pred, uint8 *dst, int width)\n{\n    OSCL_UNUSED_ARG(blk);\n    OSCL_UNUSED_ARG(width);\n    OSCL_UNUSED_ARG(dst);\n    OSCL_UNUSED_ARG(pred);\n    return ;\n}\nvoid idctcol0(int16 *blk)\n{\n    OSCL_UNUSED_ARG(blk);\n    return ;\n}\n\nvoid idctrow1(int16 *blk, uint8 *pred, uint8 *dst, int width)\n{\n    /* shortcut */\n    int tmp;\n    int i = 8;\n    uint32 pred_word, dst_word;\n    int res, res2;\n\n    /* preset the offset, such that we can take advantage pre-offset addressing mode   */\n    width -= 4;\n    dst -= width;\n    pred -= 12;\n    blk -= 8;\n\n    while (i--)\n    {\n        tmp = (*(blk += 8) + 32) >> 6;\n        *blk = 0;\n\n        pred_word = *((uint32*)(pred += 12)); /* read 4 bytes from pred */\n        res = tmp + (pred_word & 0xFF);\n        CLIP_RESULT(res);\n        res2 = tmp + ((pred_word >> 8) & 0xFF);\n        CLIP_RESULT(res2);\n        dst_word = (res2 << 8) | res;\n        res = tmp + ((pred_word >> 16) & 0xFF);\n        CLIP_RESULT(res);\n        dst_word |= (res << 16);\n        res = tmp + ((pred_word >> 24) & 0xFF);\n        CLIP_RESULT(res);\n        dst_word |= (res << 24);\n        *((uint32*)(dst += width)) = dst_word; /* save 4 bytes to dst */\n\n        pred_word = *((uint32*)(pred += 4)); /* read 4 bytes from pred */\n        res = tmp + (pred_word & 0xFF);\n        CLIP_RESULT(res);\n        res2 = tmp + ((pred_word >> 8) & 0xFF);\n        CLIP_RESULT(res2);\n        dst_word = (res2 << 8) | res;\n        res = tmp + ((pred_word >> 16) & 0xFF);\n        CLIP_RESULT(res);\n        dst_word |= (res << 16);\n        res = tmp + ((pred_word >> 24) & 0xFF);\n        CLIP_RESULT(res);\n        dst_word |= (res << 24);\n        *((uint32*)(dst += 4)) = dst_word; /* save 4 bytes to dst */\n    }\n    return;\n}\n\nvoid idctcol1(int16 *blk)\n{ /* shortcut */\n    blk[0] = blk[8] = blk[16] = blk[24] = blk[32] = blk[40] = blk[48] = blk[56] =\n                                              blk[0] << 3;\n    return;\n}\n\nvoid idctrow2(int16 *blk, uint8 *pred, uint8 *dst, int width)\n{\n    int32 x0, x1, x2, x4, x5;\n    int i = 8;\n    uint32 pred_word, dst_word;\n    int res, res2;\n\n    /* preset the offset, such that we can take advantage pre-offset addressing mode   */\n    width -= 4;\n    dst -= width;\n    pred -= 12;\n    blk -= 8;\n\n    while (i--)\n    {\n        /* shortcut */\n        x4 = blk[9];\n        blk[9] = 0;\n        x0 = ((*(blk += 8)) << 8) + 8192;\n        *blk = 0;  /* for proper rounding in the fourth stage */\n\n        /* first stage */\n        x5 = (W7 * x4 + 4) >> 3;\n        x4 = (W1 * x4 + 4) >> 3;\n\n        /* third stage */\n        x2 = (181 * (x4 + x5) + 128) >> 8;\n        x1 = (181 * (x4 - x5) + 128) >> 8;\n\n        /* fourth stage */\n        pred_word = *((uint32*)(pred += 12)); /* read 4 bytes from pred */\n        res = (x0 + x4) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x0 + x2) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x0 + x1) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x0 + x5) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(dst += width)) = dst_word; /* save 4 bytes to dst */\n\n        pred_word = *((uint32*)(pred += 4)); /* read 4 bytes from pred */\n        res = (x0 - x5) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x0 - x1) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x0 - x2) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x0 - x4) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(dst += 4)) = dst_word; /* save 4 bytes to dst */\n    }\n    return ;\n}\n\nvoid idctcol2(int16 *blk)\n{\n    int32 x0, x1, x3, x5, x7;//, x8;\n\n    x1 = blk[8];\n    x0 = ((int32)blk[0] << 11) + 128;\n    /* both upper and lower*/\n\n    x7 = W7 * x1;\n    x1 = W1 * x1;\n\n    x3 = x7;\n    x5 = (181 * (x1 - x7) + 128) >> 8;\n    x7 = (181 * (x1 + x7) + 128) >> 8;\n\n    blk[0] = (x0 + x1) >> 8;\n    blk[8] = (x0 + x7) >> 8;\n    blk[16] = (x0 + x5) >> 8;\n    blk[24] = (x0 + x3) >> 8;\n    blk[56] = (x0 - x1) >> 8;\n    blk[48] = (x0 - x7) >> 8;\n    blk[40] = (x0 - x5) >> 8;\n    blk[32] = (x0 - x3) >> 8;\n\n    return ;\n}\n\nvoid idctrow3(int16 *blk, uint8 *pred, uint8 *dst, int width)\n{\n    int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;\n    int i = 8;\n    uint32 pred_word, dst_word;\n    int res, res2;\n\n    /* preset the offset, such that we can take advantage pre-offset addressing mode   */\n    width -= 4;\n    dst -= width;\n    pred -= 12;\n    blk -= 8;\n\n    while (i--)\n    {\n        x2 = blk[10];\n        blk[10] = 0;\n        x1 = blk[9];\n        blk[9] = 0;\n        x0 = ((*(blk += 8)) << 8) + 8192;\n        *blk = 0;   /* for proper rounding in the fourth stage */\n        /* both upper and lower*/\n        /* both x2orx6 and x0orx4 */\n\n        x4 = x0;\n        x6 = (W6 * x2 + 4) >> 3;\n        x2 = (W2 * x2 + 4) >> 3;\n        x8 = x0 - x2;\n        x0 += x2;\n        x2 = x8;\n        x8 = x4 - x6;\n        x4 += x6;\n        x6 = x8;\n\n        x7 = (W7 * x1 + 4) >> 3;\n        x1 = (W1 * x1 + 4) >> 3;\n        x3 = x7;\n        x5 = (181 * (x1 - x7) + 128) >> 8;\n        x7 = (181 * (x1 + x7) + 128) >> 8;\n\n        pred_word = *((uint32*)(pred += 12)); /* read 4 bytes from pred */\n        res = (x0 + x1) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x4 + x7) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x6 + x5) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x2 + x3) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(dst += width)) = dst_word; /* save 4 bytes to dst */\n\n        pred_word = *((uint32*)(pred += 4)); /* read 4 bytes from pred */\n        res = (x2 - x3) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x6 - x5) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x4 - x7) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x0 - x1) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(dst += 4)) = dst_word; /* save 4 bytes to dst */\n    }\n\n    return ;\n}\n\nvoid idctcol3(int16 *blk)\n{\n    int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;\n\n    x2 = blk[16];\n    x1 = blk[8];\n    x0 = ((int32)blk[0] << 11) + 128;\n\n    x4 = x0;\n    x6 = W6 * x2;\n    x2 = W2 * x2;\n    x8 = x0 - x2;\n    x0 += x2;\n    x2 = x8;\n    x8 = x4 - x6;\n    x4 += x6;\n    x6 = x8;\n\n    x7 = W7 * x1;\n    x1 = W1 * x1;\n    x3 = x7;\n    x5 = (181 * (x1 - x7) + 128) >> 8;\n    x7 = (181 * (x1 + x7) + 128) >> 8;\n\n    blk[0] = (x0 + x1) >> 8;\n    blk[8] = (x4 + x7) >> 8;\n    blk[16] = (x6 + x5) >> 8;\n    blk[24] = (x2 + x3) >> 8;\n    blk[56] = (x0 - x1) >> 8;\n    blk[48] = (x4 - x7) >> 8;\n    blk[40] = (x6 - x5) >> 8;\n    blk[32] = (x2 - x3) >> 8;\n\n    return;\n}\n\n\nvoid idctrow4(int16 *blk, uint8 *pred, uint8 *dst, int width)\n{\n    int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;\n    int i = 8;\n    uint32 pred_word, dst_word;\n    int res, res2;\n\n    /* preset the offset, such that we can take advantage pre-offset addressing mode   */\n    width -= 4;\n    dst -= width;\n    pred -= 12;\n    blk -= 8;\n\n    while (i--)\n    {\n        x2 = blk[10];\n        blk[10] = 0;\n        x1 = blk[9];\n        blk[9] = 0;\n        x3 = blk[11];\n        blk[11] = 0;\n        x0 = ((*(blk += 8)) << 8) + 8192;\n        *blk = 0;    /* for proper rounding in the fourth stage */\n\n        x4 = x0;\n        x6 = (W6 * x2 + 4) >> 3;\n        x2 = (W2 * x2 + 4) >> 3;\n        x8 = x0 - x2;\n        x0 += x2;\n        x2 = x8;\n        x8 = x4 - x6;\n        x4 += x6;\n        x6 = x8;\n\n        x7 = (W7 * x1 + 4) >> 3;\n        x1 = (W1 * x1 + 4) >> 3;\n        x5 = (W3 * x3 + 4) >> 3;\n        x3 = (- W5 * x3 + 4) >> 3;\n        x8 = x1 - x5;\n        x1 += x5;\n        x5 = x8;\n        x8 = x7 - x3;\n        x3 += x7;\n        x7 = (181 * (x5 + x8) + 128) >> 8;\n        x5 = (181 * (x5 - x8) + 128) >> 8;\n\n        pred_word = *((uint32*)(pred += 12)); /* read 4 bytes from pred */\n        res = (x0 + x1) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x4 + x7) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x6 + x5) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x2 + x3) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(dst += width)) = dst_word; /* save 4 bytes to dst */\n\n        pred_word = *((uint32*)(pred += 4)); /* read 4 bytes from pred */\n        res = (x2 - x3) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x6 - x5) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x4 - x7) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x0 - x1) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(dst += 4)) = dst_word; /* save 4 bytes to dst */\n    }\n    return ;\n}\n\nvoid idctcol4(int16 *blk)\n{\n    int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;\n    x2 = blk[16];\n    x1 = blk[8];\n    x3 = blk[24];\n    x0 = ((int32)blk[0] << 11) + 128;\n\n    x4 = x0;\n    x6 = W6 * x2;\n    x2 = W2 * x2;\n    x8 = x0 - x2;\n    x0 += x2;\n    x2 = x8;\n    x8 = x4 - x6;\n    x4 += x6;\n    x6 = x8;\n\n    x7 = W7 * x1;\n    x1 = W1 * x1;\n    x5 = W3 * x3;\n    x3 = -W5 * x3;\n    x8 = x1 - x5;\n    x1 += x5;\n    x5 = x8;\n    x8 = x7 - x3;\n    x3 += x7;\n    x7 = (181 * (x5 + x8) + 128) >> 8;\n    x5 = (181 * (x5 - x8) + 128) >> 8;\n\n\n    blk[0] = (x0 + x1) >> 8;\n    blk[8] = (x4 + x7) >> 8;\n    blk[16] = (x6 + x5) >> 8;\n    blk[24] = (x2 + x3) >> 8;\n    blk[56] = (x0 - x1) >> 8;\n    blk[48] = (x4 - x7) >> 8;\n    blk[40] = (x6 - x5) >> 8;\n    blk[32] = (x2 - x3) >> 8;\n\n    return ;\n}\n\nvoid idctrow0_intra(int16 *blk, PIXEL * comp, int width)\n{\n    OSCL_UNUSED_ARG(blk);\n    OSCL_UNUSED_ARG(comp);\n    OSCL_UNUSED_ARG(width);\n    return ;\n}\n\nvoid idctrow1_intra(int16 *blk, PIXEL *comp, int width)\n{\n    /* shortcut */\n    int32 tmp;\n    int i = 8;\n    int offset = width;\n    uint32 word;\n\n    comp -= offset;\n    while (i--)\n    {\n        tmp = ((blk[0] + 32) >> 6);\n        blk[0] = 0;\n        CLIP_RESULT(tmp)\n\n        word = (tmp << 8) | tmp;\n        word = (word << 16) | word;\n\n        *((uint32*)(comp += offset)) = word;\n        *((uint32*)(comp + 4)) = word;\n\n\n\n\n        blk += B_SIZE;\n    }\n    return;\n}\n\nvoid idctrow2_intra(int16 *blk, PIXEL *comp, int width)\n{\n    int32 x0, x1, x2, x4, x5, temp;\n    int i = 8;\n    int offset = width;\n    int32 word;\n\n    comp -= offset;\n    while (i--)\n    {\n        /* shortcut */\n        x4 = blk[1];\n        blk[1] = 0;\n        x0 = ((int32)blk[0] << 8) + 8192;\n        blk[0] = 0;   /* for proper rounding in the fourth stage */\n\n        /* first stage */\n        x5 = (W7 * x4 + 4) >> 3;\n        x4 = (W1 * x4 + 4) >> 3;\n\n        /* third stage */\n        x2 = (181 * (x4 + x5) + 128) >> 8;\n        x1 = (181 * (x4 - x5) + 128) >> 8;\n\n        /* fourth stage */\n        word = ((x0 + x4) >> 14);\n        CLIP_RESULT(word)\n\n        temp = ((x0 + x2) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 8);\n        temp = ((x0 + x1) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 16);\n        temp = ((x0 + x5) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 24);\n        *((int32*)(comp += offset)) = word;\n\n        word = ((x0 - x5) >> 14);\n        CLIP_RESULT(word)\n        temp = ((x0 - x1) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 8);\n        temp = ((x0 - x2) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 16);\n        temp = ((x0 - x4) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 24);\n        *((int32*)(comp + 4)) = word;\n\n        blk += B_SIZE;\n    }\n    return ;\n}\n\nvoid idctrow3_intra(int16 *blk, PIXEL *comp, int width)\n{\n    int32 x0, x1, x2, x3, x4, x5, x6, x7, x8, temp;\n    int i = 8;\n    int offset = width;\n    int32 word;\n\n    comp -= offset;\n\n    while (i--)\n    {\n        x2 = blk[2];\n        blk[2] = 0;\n        x1 = blk[1];\n        blk[1] = 0;\n        x0 = ((int32)blk[0] << 8) + 8192;\n        blk[0] = 0;/* for proper rounding in the fourth stage */\n        /* both upper and lower*/\n        /* both x2orx6 and x0orx4 */\n\n        x4 = x0;\n        x6 = (W6 * x2 + 4) >> 3;\n        x2 = (W2 * x2 + 4) >> 3;\n        x8 = x0 - x2;\n        x0 += x2;\n        x2 = x8;\n        x8 = x4 - x6;\n        x4 += x6;\n        x6 = x8;\n\n        x7 = (W7 * x1 + 4) >> 3;\n        x1 = (W1 * x1 + 4) >> 3;\n        x3 = x7;\n        x5 = (181 * (x1 - x7) + 128) >> 8;\n        x7 = (181 * (x1 + x7) + 128) >> 8;\n\n        word = ((x0 + x1) >> 14);\n        CLIP_RESULT(word)\n        temp = ((x4 + x7) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 8);\n\n\n        temp = ((x6 + x5) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 16);\n\n        temp = ((x2 + x3) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 24);\n        *((int32*)(comp += offset)) = word;\n\n        word = ((x2 - x3) >> 14);\n        CLIP_RESULT(word)\n\n        temp = ((x6 - x5) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 8);\n\n        temp = ((x4 - x7) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 16);\n\n        temp = ((x0 - x1) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 24);\n        *((int32*)(comp + 4)) = word;\n\n        blk += B_SIZE;\n    }\n    return ;\n}\n\nvoid idctrow4_intra(int16 *blk, PIXEL *comp, int width)\n{\n    int32 x0, x1, x2, x3, x4, x5, x6, x7, x8, temp;\n    int i = 8;\n    int offset = width;\n    int32 word;\n\n    comp -= offset;\n\n    while (i--)\n    {\n        x2 = blk[2];\n        blk[2] = 0;\n        x1 = blk[1];\n        blk[1] = 0;\n        x3 = blk[3];\n        blk[3] = 0;\n        x0 = ((int32)blk[0] << 8) + 8192;\n        blk[0] = 0;/* for proper rounding in the fourth stage */\n\n        x4 = x0;\n        x6 = (W6 * x2 + 4) >> 3;\n        x2 = (W2 * x2 + 4) >> 3;\n        x8 = x0 - x2;\n        x0 += x2;\n        x2 = x8;\n        x8 = x4 - x6;\n        x4 += x6;\n        x6 = x8;\n\n        x7 = (W7 * x1 + 4) >> 3;\n        x1 = (W1 * x1 + 4) >> 3;\n        x5 = (W3 * x3 + 4) >> 3;\n        x3 = (- W5 * x3 + 4) >> 3;\n        x8 = x1 - x5;\n        x1 += x5;\n        x5 = x8;\n        x8 = x7 - x3;\n        x3 += x7;\n        x7 = (181 * (x5 + x8) + 128) >> 8;\n        x5 = (181 * (x5 - x8) + 128) >> 8;\n\n        word = ((x0 + x1) >> 14);\n        CLIP_RESULT(word)\n\n        temp = ((x4 + x7) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 8);\n\n\n        temp = ((x6 + x5) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 16);\n\n        temp = ((x2 + x3) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 24);\n        *((int32*)(comp += offset)) = word;\n\n        word = ((x2 - x3) >> 14);\n        CLIP_RESULT(word)\n\n        temp = ((x6 - x5) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 8);\n\n        temp = ((x4 - x7) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 16);\n\n        temp = ((x0 - x1) >> 14);\n        CLIP_RESULT(temp)\n        word = word | (temp << 24);\n        *((int32*)(comp + 4)) = word;\n\n        blk += B_SIZE;\n    }\n\n    return ;\n}\n\n#endif\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/max_level.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*\n*     -------------------------------------------------------------------   *\n*                    MPEG-4 Simple Profile Video Decoder                    *\n*     -------------------------------------------------------------------   *\n*\n* This software module was originally developed by\n*\n*   Michael Wollborn (TUH / ACTS-MoMuSyS)\n*\n* in the course of development of the MPEG-4 Video (ISO/IEC 14496-2) standard.\n* This software module is an implementation of a part of one or more MPEG-4\n* Video (ISO/IEC 14496-2) tools as specified by the MPEG-4 Video (ISO/IEC\n* 14496-2) standard.\n*\n* ISO/IEC gives users of the MPEG-4 Video (ISO/IEC 14496-2) standard free\n* license to this software module or modifications thereof for use in hardware\n* or software products claiming conformance to the MPEG-4 Video (ISO/IEC\n* 14496-2) standard.\n*\n* Those intending to use this software module in hardware or software products\n* are advised that its use may infringe existing patents. The original\n* developer of this software module and his/her company, the subsequent\n* editors and their companies, and ISO/IEC have no liability for use of this\n* software module or modifications thereof in an implementation. Copyright is\n* not released for non MPEG-4 Video (ISO/IEC 14496-2) Standard conforming\n* products.\n*\n* ACTS-MoMuSys partners retain full right to use the code for his/her own\n* purpose, assign or donate the code to a third party and to inhibit third\n* parties from using the code for non MPEG-4 Video (ISO/IEC 14496-2) Standard\n* conforming products. This copyright notice must be included in all copies or\n* derivative works.\n*\n* Copyright (c) 1997\n*\n*****************************************************************************\n\nThis is a header file for \"vlc_decode.c\".  The table data actually resides\nin \"vlc_tab.c\".\n\n\n------------------------------------------------------------------------------\n*/\n\n/*----------------------------------------------------------------------------\n; CONTINUE ONLY IF NOT ALREADY DEFINED\n----------------------------------------------------------------------------*/\n\n#ifndef max_level_H\n#define max_level_H\n\n/*----------------------------------------------------------------------------\n; INCLUDES\n----------------------------------------------------------------------------*/\n#include \"mp4def.h\"\n\n/*----------------------------------------------------------------------------\n; MACROS\n; Define module specific macros here\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; DEFINES\n; Include all pre-processor statements here.\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; EXTERNAL VARIABLES REFERENCES\n; Declare variables used in this module but defined elsewhere\n----------------------------------------------------------------------------*/\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n    extern const int intra_max_level[2][NCOEFF_BLOCK];\n\n    extern const int inter_max_level[2][NCOEFF_BLOCK];\n\n    extern const int intra_max_run0[28];\n\n\n    extern const int intra_max_run1[9];\n\n    extern const int inter_max_run0[13];\n\n\n    extern const int inter_max_run1[4];\n\n\n    /*----------------------------------------------------------------------------\n    ; SIMPLE TYPEDEF'S\n    ----------------------------------------------------------------------------*/\n\n    /*----------------------------------------------------------------------------\n    ; ENUMERATED TYPEDEF'S\n    ----------------------------------------------------------------------------*/\n\n    /*----------------------------------------------------------------------------\n    ; STRUCTURES TYPEDEF'S\n    ----------------------------------------------------------------------------*/\n\n\n    /*----------------------------------------------------------------------------\n    ; GLOBAL FUNCTION DEFINITIONS\n    ; Function Prototype declaration\n    ----------------------------------------------------------------------------*/\n\n\n    /*----------------------------------------------------------------------------\n    ; END\n    ----------------------------------------------------------------------------*/\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/mb_motion_comp.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*\n------------------------------------------------------------------------------\n INPUT AND OUTPUT DEFINITIONS\n\n Inputs:\n    video = pointer to structure of type VideoDecData\n\n Local Stores/Buffers/Pointers Needed:\n    roundtab16 = rounding table\n\n Global Stores/Buffers/Pointers Needed:\n    None\n\n Outputs:\n    None\n\n Pointers and Buffers Modified:\n    video->currVop->yChan contents are the newly calculated luminance\n      data\n    video->currVop->uChan contents are the newly calculated chrominance\n      b data\n    video->currVop->vChan contents are the newly calculated chrominance\n      r data\n    video->pstprcTypCur contents are the updated semaphore propagation\n      values\n\n Local Stores Modified:\n    None\n\n Global Stores Modified:\n    None\n\n------------------------------------------------------------------------------\n FUNCTION DESCRIPTION\n\n This function performs high level motion compensation on the luminance and\n chrominance data. It sets up all the parameters required by the functions\n that perform luminance and chrominance prediction and it initializes the\n pointer to the post processing semaphores of a given block. It also checks\n the motion compensation mode in order to determine which luminance or\n chrominance prediction functions to call and determines how the post\n processing semaphores are updated.\n\n*/\n\n\n/*----------------------------------------------------------------------------\n; INCLUDES\n----------------------------------------------------------------------------*/\n#include \"mp4dec_lib.h\"\n#include \"motion_comp.h\"\n/*----------------------------------------------------------------------------\n; MACROS\n; Define module specific macros here\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; DEFINES\n; Include all pre-processor statements here. Include conditional\n; compile variables also.\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; LOCAL FUNCTION DEFINITIONS\n; Function Prototype declaration\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; LOCAL STORE/BUFFER/POINTER DEFINITIONS\n; Variable declaration - defined here and used outside this module\n----------------------------------------------------------------------------*/\n/* 09/29/2000 bring this from mp4def.h */\n// const static int roundtab4[] = {0,1,1,1};\n// const static int roundtab8[] = {0,0,1,1,1,1,1,2};\n/*** 10/30 for TPS */\n// const static int roundtab12[] = {0,0,0,1,1,1,1,1,1,1,2,2};\n/* 10/30 for TPS ***/\nconst static int roundtab16[] = {0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2};\n\n\n/*----------------------------------------------------------------------------\n; EXTERNAL FUNCTION REFERENCES\n; Declare functions defined elsewhere and referenced in this module\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES\n; Declare variables used in this module but defined elsewhere\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; FUNCTION CODE\n----------------------------------------------------------------------------*/\n\n/** modified 3 August 2005 to do prediction and put the results in\nvideo->mblock->pred_block, no adding with residue */\n\nvoid  MBMotionComp(\n    VideoDecData *video,\n    int CBP\n)\n{\n\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    /* Previous Video Object Plane */\n    Vop *prev = video->prevVop;\n\n    /* Current Macroblock (MB) in the VOP */\n    int mbnum = video->mbnum;\n\n    /* Number of MB per data row */\n    int MB_in_width = video->nMBPerRow;\n    int ypos, xpos;\n    PIXEL *c_comp, *c_prev;\n    PIXEL *cu_comp, *cu_prev;\n    PIXEL *cv_comp, *cv_prev;\n    int height, width, pred_width;\n    int imv, mvwidth;\n    int32 offset;\n    uint8 mode;\n    uint8 *pred_block, *pred;\n\n    /* Motion vector (dx,dy) in half-pel resolution */\n    int dx, dy;\n\n    MOT px[4], py[4];\n    int xpred, ypred;\n    int xsum;\n    int round1;\n#ifdef PV_POSTPROC_ON // 2/14/2001      \n    /* Total number of pixels in the VOL */\n    int32 size = (int32) video->nTotalMB << 8;\n    uint8 *pp_dec_y, *pp_dec_u;\n    int ll[4];\n    int tmp = 0;\n    uint8 msk_deblock = 0;\n#endif\n    /*----------------------------------------------------------------------------\n    ; Function body here\n    ----------------------------------------------------------------------------*/\n    /* Set rounding type */\n    /* change from array to single 09/29/2000 */\n    round1 = (int)(1 - video->currVop->roundingType);\n\n    /* width of luminance data in pixels (y axis) */\n    width = video->width;\n\n    /* heigth of luminance data in pixels (x axis) */\n    height = video->height;\n\n    /* number of blocks per row */\n    mvwidth = MB_in_width << 1;\n\n    /* starting y position in current MB; origin of MB */\n    ypos = video->mbnum_row << 4 ;\n    /* starting x position in current MB; origin of MB */\n    xpos = video->mbnum_col << 4 ;\n\n    /* offset to (x,y) position in current luminance MB */\n    /* in pixel resolution                              */\n    /* ypos*width -> row, +x -> column */\n    offset = (int32)ypos * width + xpos;\n\n    /* get mode for current MB */\n    mode = video->headerInfo.Mode[mbnum];\n\n    /* block index */\n    /* imv = (xpos/8) + ((ypos/8) * mvwidth) */\n    imv = (offset >> 6) - (xpos >> 6) + (xpos >> 3);\n    if (mode & INTER_1VMASK)\n    {\n        dx = px[0] = px[1] = px[2] = px[3] = video->motX[imv];\n        dy = py[0] = py[1] = py[2] = py[3] = video->motY[imv];\n        if ((dx & 3) == 0)\n        {\n            dx = dx >> 1;\n        }\n        else\n        {\n            /* x component of MV is or'ed for rounding (?) */\n            dx = (dx >> 1) | 1;\n        }\n\n        /* y component of motion vector; divide by 2 for to */\n        /* convert to full-pel resolution.                  */\n        if ((dy & 3) == 0)\n        {\n            dy = dy >> 1;\n        }\n        else\n        {\n            /* y component of MV is or'ed for rounding (?) */\n            dy = (dy >> 1) | 1;\n        }\n    }\n    else\n    {\n        px[0] = video->motX[imv];\n        px[1] = video->motX[imv+1];\n        px[2] = video->motX[imv+mvwidth];\n        px[3] = video->motX[imv+mvwidth+1];\n        xsum = px[0] + px[1] + px[2] + px[3];\n        dx = PV_SIGN(xsum) * (roundtab16[(PV_ABS(xsum)) & 0xF] +\n                              (((PV_ABS(xsum)) >> 4) << 1));\n        py[0] = video->motY[imv];\n        py[1] = video->motY[imv+1];\n        py[2] = video->motY[imv+mvwidth];\n        py[3] = video->motY[imv+mvwidth+1];\n        xsum = py[0] + py[1] + py[2] + py[3];\n        dy = PV_SIGN(xsum) * (roundtab16[(PV_ABS(xsum)) & 0xF] +\n                              (((PV_ABS(xsum)) >> 4) << 1));\n    }\n\n    /* Pointer to previous luminance frame */\n    c_prev  = prev->yChan;\n\n    pred_block = video->mblock->pred_block;\n\n    /* some blocks have no residue or INTER4V */\n    /*if (mode == MODE_INTER4V)   05/08/15 */\n    /* Motion Compensation for an 8x8 block within a MB */\n    /* (4 MV per MB) */\n\n\n\n    /* Call function that performs luminance prediction */\n    /*      luminance_pred_mode_inter4v(xpos, ypos, px, py, c_prev,\n                    video->mblock->pred_block, width, height,\n                    round1, mvwidth, &xsum, &ysum);*/\n    c_comp = video->currVop->yChan + offset;\n\n\n    xpred = (int)((xpos << 1) + px[0]);\n    ypred = (int)((ypos << 1) + py[0]);\n\n    if ((CBP >> 5)&1)\n    {\n        pred = pred_block;\n        pred_width = 16;\n    }\n    else\n    {\n        pred = c_comp;\n        pred_width = width;\n    }\n\n    /* check whether the MV points outside the frame */\n    if (xpred >= 0 && xpred <= ((width << 1) - (2*B_SIZE)) &&\n            ypred >= 0 && ypred <= ((height << 1) - (2*B_SIZE)))\n    {   /*****************************/\n        /* (x,y) is inside the frame */\n        /*****************************/\n        ;\n        GetPredAdvBTable[ypred&1][xpred&1](c_prev + (xpred >> 1) + ((ypred >> 1)*width),\n                                           pred, width, (pred_width << 1) | round1);\n    }\n    else\n    {   /******************************/\n        /* (x,y) is outside the frame */\n        /******************************/\n        GetPredOutside(xpred, ypred, c_prev,\n                       pred, width, height, round1, pred_width);\n    }\n\n\n    /* Compute prediction values over current luminance MB */\n    /* (blocks 1); add motion vector prior to input;       */\n    /* add 8 to x_pos to advance to next block         */\n    xpred = (int)(((xpos + B_SIZE) << 1) + px[1]);\n    ypred = (int)((ypos << 1) + py[1]);\n\n    if ((CBP >> 4)&1)\n    {\n        pred = pred_block + 8;\n        pred_width = 16;\n    }\n    else\n    {\n        pred = c_comp + 8;\n        pred_width = width;\n    }\n\n    /* check whether the MV points outside the frame */\n    if (xpred >= 0 && xpred <= ((width << 1) - (2*B_SIZE)) &&\n            ypred >= 0 && ypred <= ((height << 1) - (2*B_SIZE)))\n    {   /*****************************/\n        /* (x,y) is inside the frame */\n        /*****************************/\n        GetPredAdvBTable[ypred&1][xpred&1](c_prev + (xpred >> 1) + ((ypred >> 1)*width),\n                                           pred, width, (pred_width << 1) | round1);\n    }\n    else\n    {   /******************************/\n        /* (x,y) is outside the frame */\n        /******************************/\n        GetPredOutside(xpred, ypred, c_prev,\n                       pred, width, height, round1, pred_width);\n    }\n\n\n\n    /* Compute prediction values over current luminance MB */\n    /* (blocks 2); add motion vector prior to input        */\n    /* add 8 to y_pos to advance to block on next row      */\n    xpred = (int)((xpos << 1) + px[2]);\n    ypred = (int)(((ypos + B_SIZE) << 1) + py[2]);\n\n    if ((CBP >> 3)&1)\n    {\n        pred = pred_block + 128;\n        pred_width = 16;\n    }\n    else\n    {\n        pred = c_comp + (width << 3);\n        pred_width = width;\n    }\n\n    /* check whether the MV points outside the frame */\n    if (xpred >= 0 && xpred <= ((width << 1) - (2*B_SIZE)) &&\n            ypred >= 0 && ypred <= ((height << 1) - (2*B_SIZE)))\n    {   /*****************************/\n        /* (x,y) is inside the frame */\n        /*****************************/\n        GetPredAdvBTable[ypred&1][xpred&1](c_prev + (xpred >> 1) + ((ypred >> 1)*width),\n                                           pred, width, (pred_width << 1) | round1);\n    }\n    else\n    {   /******************************/\n        /* (x,y) is outside the frame */\n        /******************************/\n        GetPredOutside(xpred, ypred, c_prev,\n                       pred, width, height, round1, pred_width);\n    }\n\n\n\n    /* Compute prediction values over current luminance MB */\n    /* (blocks 3); add motion vector prior to input;       */\n    /* add 8 to x_pos and y_pos to advance to next block   */\n    /* on next row                         */\n    xpred = (int)(((xpos + B_SIZE) << 1) + px[3]);\n    ypred = (int)(((ypos + B_SIZE) << 1) + py[3]);\n\n    if ((CBP >> 2)&1)\n    {\n        pred = pred_block + 136;\n        pred_width = 16;\n    }\n    else\n    {\n        pred = c_comp + (width << 3) + 8;\n        pred_width = width;\n    }\n\n    /* check whether the MV points outside the frame */\n    if (xpred >= 0 && xpred <= ((width << 1) - (2*B_SIZE)) &&\n            ypred >= 0 && ypred <= ((height << 1) - (2*B_SIZE)))\n    {   /*****************************/\n        /* (x,y) is inside the frame */\n        /*****************************/\n        GetPredAdvBTable[ypred&1][xpred&1](c_prev + (xpred >> 1) + ((ypred >> 1)*width),\n                                           pred, width, (pred_width << 1) | round1);\n    }\n    else\n    {   /******************************/\n        /* (x,y) is outside the frame */\n        /******************************/\n        GetPredOutside(xpred, ypred, c_prev,\n                       pred, width, height, round1, pred_width);\n    }\n    /* Call function to set de-blocking and de-ringing */\n    /*   semaphores for luminance                      */\n\n#ifdef PV_POSTPROC_ON\n    if (video->postFilterType != PV_NO_POST_PROC)\n    {\n        if (mode&INTER_1VMASK)\n        {\n            pp_dec_y = video->pstprcTypCur + imv;\n            ll[0] = 1;\n            ll[1] = mvwidth - 1;\n            ll[2] = 1;\n            ll[3] = -mvwidth - 1;\n            msk_deblock = pp_semaphore_luma(xpred, ypred, pp_dec_y,\n                                            video->pstprcTypPrv, ll, &tmp, px[0], py[0], mvwidth,\n                                            width, height);\n\n            pp_dec_u = video->pstprcTypCur + (size >> 6) +\n                       ((imv + (xpos >> 3)) >> 2);\n\n            pp_semaphore_chroma_inter(xpred, ypred, pp_dec_u,\n                                      video->pstprcTypPrv, dx, dy, mvwidth, height, size,\n                                      tmp, msk_deblock);\n        }\n        else\n        {\n            /* Post-processing mode (MBM_INTER8) */\n            /* deblocking and deringing) */\n            pp_dec_y = video->pstprcTypCur + imv;\n            *pp_dec_y = 4;\n            *(pp_dec_y + 1) = 4;\n            *(pp_dec_y + mvwidth) = 4;\n            *(pp_dec_y + mvwidth + 1) = 4;\n            pp_dec_u = video->pstprcTypCur + (size >> 6) +\n                       ((imv + (xpos >> 3)) >> 2);\n            *pp_dec_u = 4;\n            pp_dec_u[size>>8] = 4;\n        }\n    }\n#endif\n\n\n    /* xpred and ypred calculation for Chrominance is */\n    /* in full-pel resolution.                        */\n\n    /* Chrominance */\n    /* width of chrominance data in pixels (y axis) */\n    width >>= 1;\n\n    /* heigth of chrominance data in pixels (x axis) */\n    height >>= 1;\n\n    /* Pointer to previous chrominance b frame */\n    cu_prev = prev->uChan;\n\n    /* Pointer to previous chrominance r frame */\n    cv_prev = prev->vChan;\n\n    /* x position in prediction data offset by motion vector */\n    /* xpred calculation for Chrominance is in full-pel      */\n    /* resolution.                                           */\n    xpred = xpos + dx;\n\n    /* y position in prediction data offset by motion vector */\n    /* ypred calculation for Chrominance is in full-pel      */\n    /* resolution.                                           */\n    ypred = ypos + dy;\n\n    cu_comp = video->currVop->uChan + (offset >> 2) + (xpos >> 2);\n    cv_comp = video->currVop->vChan + (offset >> 2) + (xpos >> 2);\n\n    /* Call function that performs chrominance prediction */\n    /*      chrominance_pred(xpred, ypred, cu_prev, cv_prev,\n            pred_block, width_uv, height_uv,\n            round1);*/\n    if (xpred >= 0 && xpred <= ((width << 1) - (2*B_SIZE)) && ypred >= 0 &&\n            ypred <= ((height << 1) - (2*B_SIZE)))\n    {\n        /*****************************/\n        /* (x,y) is inside the frame */\n        /*****************************/\n        if ((CBP >> 1)&1)\n        {\n            pred = pred_block + 256;\n            pred_width = 16;\n        }\n        else\n        {\n            pred = cu_comp;\n            pred_width = width;\n        }\n\n        /* Compute prediction for Chrominance b (block[4]) */\n        GetPredAdvBTable[ypred&1][xpred&1](cu_prev + (xpred >> 1) + ((ypred >> 1)*width),\n                                           pred, width, (pred_width << 1) | round1);\n\n        if (CBP&1)\n        {\n            pred = pred_block + 264;\n            pred_width = 16;\n        }\n        else\n        {\n            pred = cv_comp;\n            pred_width = width;\n        }\n        /* Compute prediction for Chrominance r (block[5]) */\n        GetPredAdvBTable[ypred&1][xpred&1](cv_prev + (xpred >> 1) + ((ypred >> 1)*width),\n                                           pred, width, (pred_width << 1) | round1);\n\n        return ;\n    }\n    else\n    {\n        /******************************/\n        /* (x,y) is outside the frame */\n        /******************************/\n        if ((CBP >> 1)&1)\n        {\n            pred = pred_block + 256;\n            pred_width = 16;\n        }\n        else\n        {\n            pred = cu_comp;\n            pred_width = width;\n        }\n\n        /* Compute prediction for Chrominance b (block[4]) */\n        GetPredOutside(xpred, ypred,    cu_prev,\n                       pred, width, height, round1, pred_width);\n\n        if (CBP&1)\n        {\n            pred = pred_block + 264;\n            pred_width = 16;\n        }\n        else\n        {\n            pred = cv_comp;\n            pred_width = width;\n        }\n\n        /* Compute prediction for Chrominance r (block[5]) */\n        GetPredOutside(xpred, ypred,    cv_prev,\n                       pred, width, height, round1, pred_width);\n\n        return ;\n    }\n\n}\n\n/*** special function for skipped macroblock,  Aug 15, 2005 */\nvoid  SkippedMBMotionComp(\n    VideoDecData *video\n)\n{\n    Vop *prev = video->prevVop;\n    Vop *comp;\n    int ypos, xpos;\n    PIXEL *c_comp, *c_prev;\n    PIXEL *cu_comp, *cu_prev;\n    PIXEL *cv_comp, *cv_prev;\n    int width, width_uv;\n    int32 offset;\n#ifdef PV_POSTPROC_ON // 2/14/2001      \n    int imv;\n    int32 size = (int32) video->nTotalMB << 8;\n    uint8 *pp_dec_y, *pp_dec_u;\n    uint8 *pp_prev1;\n    int mvwidth = video->nMBPerRow << 1;\n#endif\n\n    width = video->width;\n    width_uv  = width >> 1;\n    ypos = video->mbnum_row << 4 ;\n    xpos = video->mbnum_col << 4 ;\n    offset = (int32)ypos * width + xpos;\n\n\n    /* zero motion compensation for previous frame */\n    /*mby*width + mbx;*/\n    c_prev  = prev->yChan + offset;\n    /*by*width_uv + bx;*/\n    cu_prev = prev->uChan + (offset >> 2) + (xpos >> 2);\n    /*by*width_uv + bx;*/\n    cv_prev = prev->vChan + (offset >> 2) + (xpos >> 2);\n\n    comp = video->currVop;\n\n    c_comp  = comp->yChan + offset;\n    cu_comp = comp->uChan + (offset >> 2) + (xpos >> 2);\n    cv_comp = comp->vChan + (offset >> 2) + (xpos >> 2);\n\n\n    /* Copy previous reconstructed frame into the current frame */\n    PutSKIPPED_MB(c_comp,  c_prev, width);\n    PutSKIPPED_B(cu_comp, cu_prev, width_uv);\n    PutSKIPPED_B(cv_comp, cv_prev, width_uv);\n\n    /*  10/24/2000 post_processing semaphore generation */\n#ifdef PV_POSTPROC_ON // 2/14/2001\n    if (video->postFilterType != PV_NO_POST_PROC)\n    {\n        imv = (offset >> 6) - (xpos >> 6) + (xpos >> 3);\n        /* Post-processing mode (copy previous MB) */\n        pp_prev1 = video->pstprcTypPrv + imv;\n        pp_dec_y = video->pstprcTypCur + imv;\n        *pp_dec_y = *pp_prev1;\n        *(pp_dec_y + 1) = *(pp_prev1 + 1);\n        *(pp_dec_y + mvwidth) = *(pp_prev1 + mvwidth);\n        *(pp_dec_y + mvwidth + 1) = *(pp_prev1 + mvwidth + 1);\n\n        /* chrominance */\n        /*4*MB_in_width*MB_in_height*/\n        pp_prev1 = video->pstprcTypPrv + (size >> 6) +\n                   ((imv + (xpos >> 3)) >> 2);\n        pp_dec_u = video->pstprcTypCur + (size >> 6) +\n                   ((imv + (xpos >> 3)) >> 2);\n        *pp_dec_u = *pp_prev1;\n        pp_dec_u[size>>8] = pp_prev1[size>>8];\n    }\n#endif\n    /*----------------------------------------------------------------------------\n    ; Return nothing or data or data pointer\n    ----------------------------------------------------------------------------*/\n\n    return;\n}\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/mb_utils.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"mp4dec_lib.h\"\n\n/* ====================================================================== /\n    Function : PutSKIPPED_MB()\n    Date     : 04/03/2000\n/ ====================================================================== */\n\nvoid PutSKIPPED_MB(uint8 *comp, uint8 *prev, int width)\n{\n    int32 *temp0, *temp1;\n    int  row;\n    row = MB_SIZE;\n\n\n    while (row)\n    {\n        temp0 = (int32 *)prev;\n        temp1 = (int32 *)comp;\n\n        temp1[0] = temp0[0];\n        temp1[1] = temp0[1];\n        temp1[2] = temp0[2];\n        temp1[3] = temp0[3];\n\n        comp += width;\n        prev += width;\n\n        temp0 = (int32 *)prev;\n        temp1 = (int32 *)comp;\n\n        temp1[0] = temp0[0];\n        temp1[1] = temp0[1];\n        temp1[2] = temp0[2];\n        temp1[3] = temp0[3];\n\n        comp += width;\n        prev += width;\n\n        temp0 = (int32 *)prev;\n        temp1 = (int32 *)comp;\n        temp1[0] = temp0[0];\n        temp1[1] = temp0[1];\n        temp1[2] = temp0[2];\n        temp1[3] = temp0[3];\n\n\n        comp += width;\n        prev += width;\n\n        temp0 = (int32 *)prev;\n        temp1 = (int32 *)comp;\n        temp1[0] = temp0[0];\n        temp1[1] = temp0[1];\n        temp1[2] = temp0[2];\n        temp1[3] = temp0[3];\n\n        comp += width;\n        prev += width;\n        row -= 4;\n    }\n}\n\n\n/* ====================================================================== /\n    Function : PutSKIPPED_B()\n    Date     : 04/03/2000\n/ ====================================================================== */\n\nvoid PutSKIPPED_B(uint8 *comp, uint8 *prev, int width)\n{\n    int32 *temp0, *temp1;\n    int  row;\n\n    row = B_SIZE;\n    while (row)\n    {\n        temp0 = (int32 *)prev;\n        temp1 = (int32 *)comp;\n\n        temp1[0] = temp0[0];\n        temp1[1] = temp0[1];\n\n        comp += width;\n        prev += width;\n\n        temp0 = (int32 *)prev;\n        temp1 = (int32 *)comp;\n\n        temp1[0] = temp0[0];\n        temp1[1] = temp0[1];\n\n        comp += width;\n        prev += width;\n\n        temp0 = (int32 *)prev;\n        temp1 = (int32 *)comp;\n\n        temp1[0] = temp0[0];\n        temp1[1] = temp0[1];\n\n        comp += width;\n        prev += width;\n\n        temp0 = (int32 *)prev;\n        temp1 = (int32 *)comp;\n\n        temp1[0] = temp0[0];\n        temp1[1] = temp0[1];\n\n        comp += width;\n        prev += width;\n        row -= 4;\n    }\n}\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/mbtype_mode.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\nconst static int MBtype_mode[] =\n{\n    MODE_INTER,\n    MODE_INTER_Q,\n    MODE_INTER4V,\n    MODE_INTRA,\n    MODE_INTRA_Q,\n#ifdef PV_ANNEX_IJKT_SUPPORT\n    MODE_INTER4V_Q,\n#endif\n    MODE_SKIPPED\n};\n#ifdef PV_ANNEX_IJKT_SUPPORT\nconst static int16 DQ_tab_Annex_T_10[32] = {0, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3};\nconst static int16 DQ_tab_Annex_T_11[32] = {0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, -5};\nconst static int16 MQ_chroma_QP_table[32] = {0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 13, 13, 13,\n        14, 14, 14, 14, 14, 15, 15, 15, 15, 15\n                                            };\n#endif\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/motion_comp.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef motion_comp_h\n#define motion_comp_h\n\n/*----------------------------------------------------------------------------\n; INCLUDES\n----------------------------------------------------------------------------*/\n#include \"mp4dec_lib.h\"\n\n/*----------------------------------------------------------------------------\n; MACROS\n; Define module specific macros here\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; DEFINES\n; Include all pre-processor statements here.\n----------------------------------------------------------------------------*/\n/* CBP Mask defines used in chrominance prediction */\n#define CBP_MASK_CHROMA_BLK4    0x2\n#define CBP_MASK_CHROMA_BLK5    0x1\n\n/* CBP Mask defines used in luminance prediction (MODE_INTER4V) */\n#define CBP_MASK_BLK0_MODE_INTER4V  0x20\n#define CBP_MASK_BLK1_MODE_INTER4V  0x10\n#define CBP_MASK_BLK2_MODE_INTER4V  0x08\n#define CBP_MASK_BLK3_MODE_INTER4V  0x04\n\n/* CBP Mask defines used in luminance prediction (MODE_INTER or MODE_INTER_Q) */\n#define CBP_MASK_MB_MODE_INTER  0x3c\n\n/*----------------------------------------------------------------------------\n; EXTERNAL VARIABLES REFERENCES\n; Declare variables used in this module but defined elsewhere\n----------------------------------------------------------------------------*/\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n#define CLIP_RESULT(x)      if(x & -256){x = 0xFF & (~(x>>31));}\n#define ADD_AND_CLIP1(x)    x += (pred_word&0xFF); CLIP_RESULT(x);\n#define ADD_AND_CLIP2(x)    x += ((pred_word>>8)&0xFF); CLIP_RESULT(x);\n#define ADD_AND_CLIP3(x)    x += ((pred_word>>16)&0xFF); CLIP_RESULT(x);\n#define ADD_AND_CLIP4(x)    x += ((pred_word>>24)&0xFF); CLIP_RESULT(x);\n\n#define ADD_AND_CLIP(x,y)    {  x9 = ~(x>>8); \\\n                            if(x9!=-1){ \\\n                                x9 = ((uint32)x9)>>24; \\\n                                y = x9|(y<<8); \\\n                            } \\\n                            else \\\n                            {    \\\n                                y =  x|(y<<8); \\\n                            } \\\n                            }\n\n\n    static int (*const GetPredAdvBTable[2][2])(uint8*, uint8*, int, int) =\n    {\n        {&GetPredAdvancedBy0x0, &GetPredAdvancedBy0x1},\n        {&GetPredAdvancedBy1x0, &GetPredAdvancedBy1x1}\n    };\n\n    /*----------------------------------------------------------------------------\n    ; SIMPLE TYPEDEF'S\n    ----------------------------------------------------------------------------*/\n\n    /*----------------------------------------------------------------------------\n    ; ENUMERATED TYPEDEF'S\n    ----------------------------------------------------------------------------*/\n\n    /*----------------------------------------------------------------------------\n    ; STRUCTURES TYPEDEF'S\n    ----------------------------------------------------------------------------*/\n\n    /*----------------------------------------------------------------------------\n    ; GLOBAL FUNCTION DEFINITIONS\n    ; Function Prototype declaration\n    ----------------------------------------------------------------------------*/\n\n    /*----------------------------------------------------------------------------\n    ; END\n    ----------------------------------------------------------------------------*/\n#endif\n\n#ifdef __cplusplus\n}\n#endif\n\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/mp4dec_lib.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef _MP4DECLIB_H_\n#define _MP4DECLIB_H_\n\n/*----------------------------------------------------------------------------\n; INCLUDES\n----------------------------------------------------------------------------*/\n#include \"oscl_mem.h\"\n#include \"mp4def.h\" /* typedef */\n#include \"mp4lib_int.h\" /* main video structure */\n\n/*----------------------------------------------------------------------------\n; MACROS\n; Define module specific macros here\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; DEFINES\n; Include all pre-processor statements here.\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; EXTERNAL VARIABLES REFERENCES\n; Declare variables used in this module but defined elsewhere\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; SIMPLE TYPEDEF'S\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; ENUMERATED TYPEDEF'S\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; STRUCTURES TYPEDEF'S\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; GLOBAL FUNCTION DEFINITIONS\n; Function Prototype declaration\n----------------------------------------------------------------------------*/\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif /* __cplusplus */\n\n    /* defined in pvdec_api.c, these function are not supposed to be    */\n    /* exposed to programmers outside PacketVideo.  08/15/2000.    */\n    uint VideoDecoderErrorDetected(VideoDecData *video);\n\n#ifdef ENABLE_LOG\n    void m4vdec_dprintf(char *format, ...);\n#define mp4dec_log(message) m4vdec_dprintf(message)\n#else\n#define mp4dec_log(message)\n#endif\n\n    /*--------------------------------------------------------------------------*/\n    /* defined in frame_buffer.c */\n    PV_STATUS FillFrameBufferNew(BitstreamDecVideo *stream);\n    PV_STATUS FillFrameBuffer(BitstreamDecVideo *stream, int short_header);\n\n    /*--------------------------------------------------------------------------*/\n    /* defined in dc_ac_pred.c */\n    int cal_dc_scaler(int QP, int type);\n    PV_STATUS PV_DecodePredictedIntraDC(int compnum, BitstreamDecVideo *stream,\n                                        int16 *IntraDC_delta);\n\n    void    doDCACPrediction(VideoDecData *video, int comp, int16 *q_block,\n                             int *direction);\n\n#ifdef PV_ANNEX_IJKT_SUPPORT\n    void    doDCACPrediction_I(VideoDecData *video, int comp, int16 *q_block);\n#endif\n    /*--------------------------------------------------------------------------*/\n    /* defined in block_idct.c */\n    void MBlockIDCTAdd(VideoDecData *video, int nz_coefs[]);\n\n    void BlockIDCT(uint8 *dst, uint8 *pred, int16 *blk, int width, int nzcoefs,\n                   uint8 *bitmapcol, uint8 bitmaprow);\n\n    void MBlockIDCT(VideoDecData *video);\n    void BlockIDCT_intra(MacroBlock *mblock, PIXEL *c_comp, int comp, int width_offset);\n    /*--------------------------------------------------------------------------*/\n    /* defined in combined_decode.c */\n    PV_STATUS DecodeFrameCombinedMode(VideoDecData *video);\n    PV_STATUS GetMBheader(VideoDecData *video, int16 *QP);\n    PV_STATUS GetMBData(VideoDecData *video);\n\n    /*--------------------------------------------------------------------------*/\n    /* defined in datapart_decode.c */\n    PV_STATUS DecodeFrameDataPartMode(VideoDecData *video);\n    PV_STATUS GetMBheaderDataPart_DQUANT_DC(VideoDecData *video, int16 *QP);\n    PV_STATUS GetMBheaderDataPart_P(VideoDecData *video);\n    PV_STATUS DecodeDataPart_I_VideoPacket(VideoDecData *video, int slice_counter);\n    PV_STATUS DecodeDataPart_P_VideoPacket(VideoDecData *video, int slice_counter);\n    PV_STATUS GetMBData_DataPart(VideoDecData *video);\n\n    /*--------------------------------------------------------------------------*/\n    /* defined in packet_util.c */\n    PV_STATUS PV_ReadVideoPacketHeader(VideoDecData *video, int *next_MB);\n    PV_STATUS RecoverPacketError(BitstreamDecVideo *stream, int marker_length, int32 *nextVop);\n    PV_STATUS RecoverGOBError(BitstreamDecVideo *stream, int marker_length, int32 *vopPos);\n    PV_STATUS PV_GobHeader(VideoDecData *video);\n#ifdef PV_ANNEX_IJKT_SUPPORT\n    PV_STATUS PV_H263SliceHeader(VideoDecData *videoInt, int *next_MB);\n#endif\n    /*--------------------------------------------------------------------------*/\n    /* defined in motion_comp.c */\n    void MBMotionComp(VideoDecData *video, int CBP);\n    void  SkippedMBMotionComp(VideoDecData *video);\n\n    /*--------------------------------------------------------------------------*/\n    /* defined in chrominance_pred.c */\n    void chrominance_pred(\n        int xpred,          /* i */\n        int ypred,          /* i */\n        uint8 *cu_prev,     /* i */\n        uint8 *cv_prev,     /* i */\n        uint8 *pred_block,  /* i */\n        int width_uv,       /* i */\n        int height_uv,      /* i */\n        int round1\n    );\n\n    /*--------------------------------------------------------------------------*/\n    /* defined in luminance_pred_mode_inter.c */\n    void luminance_pred_mode_inter(\n        int xpred,          /* i */\n        int ypred,          /* i */\n        uint8 *c_prev,      /* i */\n        uint8 *pred_block,  /* i */\n        int width,          /* i */\n        int height,         /* i */\n        int round1\n    );\n\n    /*--------------------------------------------------------------------------*/\n    /* defined in luminance_pred_mode_inter4v.c */\n    void luminance_pred_mode_inter4v(\n        int xpos,           /* i */\n        int ypos,           /* i */\n        MOT *px,            /* i */\n        MOT *py,            /* i */\n        uint8 *c_prev,      /* i */\n        uint8 *pred_block,  /* i */\n        int width,          /* i */\n        int height,         /* i */\n        int round1,         /* i */\n        int mvwidth,            /* i */\n        int *xsum_ptr,          /* i/o */\n        int *ysum_ptr           /* i/o */\n    );\n\n    /*--------------------------------------------------------------------------*/\n    /* defined in pp_semaphore_chroma_inter.c */\n#ifdef PV_POSTPROC_ON\n    void pp_semaphore_chroma_inter(\n        int xpred,      /* i */\n        int ypred,      /* i */\n        uint8   *pp_dec_u,  /* i/o */\n        uint8   *pstprcTypPrv,  /* i */\n        int dx,     /* i */\n        int dy,     /* i */\n        int mvwidth,    /* i */\n        int height,     /* i */\n        int32   size,       /* i */\n        int mv_loc,     /* i */\n        uint8   msk_deblock /* i */\n    );\n\n    /*--------------------------------------------------------------------------*/\n    /* defined in pp_semaphore_luma.c */\n    uint8 pp_semaphore_luma(\n        int xpred,      /* i */\n        int ypred,      /* i */\n        uint8   *pp_dec_y,  /* i/o */\n        uint8   *pstprcTypPrv,  /* i */\n        int *ll,        /* i */\n        int *mv_loc,    /* i/o */\n        int dx,     /* i */\n        int dy,     /* i */\n        int mvwidth,    /* i */\n        int width,      /* i */\n        int height      /* i */\n    );\n#endif\n    /*--------------------------------------------------------------------------*/\n    /* defined in get_pred_adv_mb_add.c */\n    int GetPredAdvancedMB(\n        int xpos,\n        int ypos,\n        uint8 *c_prev,\n        uint8 *pred_block,\n        int width,\n        int rnd1\n    );\n\n    /*--------------------------------------------------------------------------*/\n    /* defined in get_pred_adv_b_add.c */\n    int GetPredAdvancedBy0x0(\n        uint8 *c_prev,      /* i */\n        uint8 *pred_block,      /* i */\n        int width,      /* i */\n        int pred_width_rnd /* i */\n    );\n\n    int GetPredAdvancedBy0x1(\n        uint8 *c_prev,      /* i */\n        uint8 *pred_block,      /* i */\n        int width,      /* i */\n        int pred_width_rnd /* i */\n    );\n\n    int GetPredAdvancedBy1x0(\n        uint8 *c_prev,      /* i */\n        uint8 *pred_block,      /* i */\n        int width,      /* i */\n        int pred_width_rnd /* i */\n    );\n\n    int GetPredAdvancedBy1x1(\n        uint8 *c_prev,      /* i */\n        uint8 *pred_block,      /* i */\n        int width,      /* i */\n        int pred_width_rnd /* i */\n    );\n\n    /*--------------------------------------------------------------------------*/\n    /* defined in get_pred_outside.c */\n    int GetPredOutside(\n        int xpos,\n        int ypos,\n        uint8 *c_prev,\n        uint8 *pred_block,\n        int width,\n        int height,\n        int rnd1,\n        int pred_width\n    );\n\n    /*--------------------------------------------------------------------------*/\n    /* defined in find_pmvsErrRes.c */\n    void mv_prediction(VideoDecData *video, int block, MOT *mvx, MOT *mvy);\n\n    /*--------------------------------------------------------------------------*/\n\n    /*--------------------------------------------------------------------------*/\n    /* defined in mb_utils.c */\n    void Copy_MB_into_Vop(uint8 *comp, int yChan[][NCOEFF_BLOCK], int width);\n    void Copy_B_into_Vop(uint8 *comp, int cChan[], int width);\n    void PutSKIPPED_MB(uint8 *comp, uint8 *c_prev, int width);\n    void PutSKIPPED_B(uint8 *comp, uint8 *c_prev, int width);\n\n    /*--------------------------------------------------------------------------*/\n    /* defined in vop.c */\n    PV_STATUS DecodeGOVHeader(BitstreamDecVideo *stream, uint32 *time_base);\n    PV_STATUS DecodeVOLHeader(VideoDecData *video, int layer);\n    PV_STATUS DecodeVOPHeader(VideoDecData *video, Vop *currVop, Bool use_ext_tiemstamp);\n    PV_STATUS DecodeShortHeader(VideoDecData *video, Vop *currVop);\n    PV_STATUS DecodeH263Header(VideoDecData *video, Vop *currVop);\n    PV_STATUS PV_DecodeVop(VideoDecData *video);\n    uint32 CalcVopDisplayTime(Vol *currVol, Vop *currVop, int shortVideoHeader);\n\n    /*--------------------------------------------------------------------------*/\n    /* defined in post_proc.c */\n#ifdef PV_ANNEX_IJKT_SUPPORT\n    void H263_Deblock(uint8 *rec,   int width, int height, int16 *QP_store, uint8 *mode, int chr, int T);\n#endif\n    int  PostProcSemaphore(int16 *q_block);\n    void PostFilter(VideoDecData *video, int filer_type, uint8 *output);\n    void FindMaxMin(uint8 *ptr, int *min, int *max, int incr);\n    void DeringAdaptiveSmoothMMX(uint8 *img, int incr, int thres, int mxdf);\n    void AdaptiveSmooth_NoMMX(uint8 *Rec_Y, int v0, int h0, int v_blk, int h_blk,\n                              int thr, int width, int max_diff);\n    void Deringing_Luma(uint8 *Rec_Y, int width, int height, int16 *QP_store,\n                        int Combined, uint8 *pp_mod);\n    void Deringing_Chroma(uint8 *Rec_C, int width, int height, int16 *QP_store,\n                          int Combined, uint8 *pp_mod);\n    void CombinedHorzVertFilter(uint8 *rec, int width, int height, int16 *QP_store,\n                                int chr, uint8 *pp_mod);\n    void CombinedHorzVertFilter_NoSoftDeblocking(uint8 *rec, int width, int height, int16 *QP_store,\n            int chr, uint8 *pp_mod);\n    void CombinedHorzVertRingFilter(uint8 *rec, int width, int height,\n                                    int16 *QP_store, int chr, uint8 *pp_mod);\n\n    /*--------------------------------------------------------------------------*/\n    /* defined in conceal.c */\n    void ConcealTexture_I(VideoDecData *video, int32 startFirstPartition, int mb_start, int mb_stop,\n                          int slice_counter);\n    void ConcealTexture_P(VideoDecData *video, int mb_start, int mb_stop,\n                          int slice_counter);\n    void ConcealPacket(VideoDecData *video, int mb_start, int mb_stop,\n                       int slice_counter);\n    void CopyVopMB(Vop *curr, uint8 *prev, int mbnum, int width, int height);\n\n    /* define in vlc_dequant.c ,  09/18/2000*/\n#ifdef PV_SUPPORT_MAIN_PROFILE\n    int VlcDequantMpegIntraBlock(void *video, int comp, int switched,\n                                 uint8 *bitmapcol, uint8 *bitmaprow);\n    int VlcDequantMpegInterBlock(void *video, int comp,\n                                 uint8 *bitmapcol, uint8 *bitmaprow);\n#endif\n    int VlcDequantH263IntraBlock(VideoDecData *video, int comp, int switched,\n                                 uint8 *bitmapcol, uint8 *bitmaprow);\n    int VlcDequantH263IntraBlock_SH(VideoDecData *video, int comp,\n                                    uint8 *bitmapcol, uint8 *bitmaprow);\n    int VlcDequantH263InterBlock(VideoDecData *video, int comp,\n                                 uint8 *bitmapcol, uint8 *bitmaprow);\n\n#ifdef __cplusplus\n}\n#endif /* __cplusplus */\n\n/*----------------------------------------------------------------------------\n; END\n----------------------------------------------------------------------------*/\n#endif\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/mp4def.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2010 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef _PVDECDEF_H_\n#define _PVDECDEF_H_\n\n#include \"mp4dec_api.h\"\n\ntypedef enum\n{\n    PV_SUCCESS,\n    PV_FAIL,\n    PV_MB_STUFFING,         /* hit Macroblock_Stuffing */\n    PV_END_OF_VOP,          /* hit End_of_Video_Object_Plane */\n    PV_END_OF_MB            /* hit End_of_Macroblock */\n#ifdef PV_TOLERATE_VOL_ERRORS\n    , PV_BAD_VOLHEADER\n#endif\n} PV_STATUS;\n\ntypedef uint8 PIXEL;\ntypedef int16 MOT;   /*  : \"int\" type runs faster on RISC machine */\n\n#define TRUE    1\n#define FALSE   0\n\n#define PV_ABS(x)       (((x)<0)? -(x) : (x))\n#define PV_SIGN(x)      (((x)<0)? -1 : 1)\n#define PV_SIGN0(a)     (((a)<0)? -1 : (((a)>0) ? 1 : 0))\n#define PV_MAX(a,b)     ((a)>(b)? (a):(b))\n#define PV_MIN(a,b)     ((a)<(b)? (a):(b))\n#define PV_MEDIAN(A,B,C) ((A) > (B) ? ((A) < (C) ? (A) : (B) > (C) ? (B) : (C)): (B) < (C) ? (B) : (C) > (A) ? (C) : (A))\n/* You don't want to use ((x>UB)?UB:(x<LB)?LB:x) for the clipping */\n/*    because it will use one extra comparison if the compiler is */\n/*    not well-optimized.    04/19/2000.                        */\n#define CLIP_THE_RANGE(x,LB,UB) if (x<LB) x = LB; else if (x>UB) x = UB\n\n// Setting up the default values if not already defined by CML2\n\n#define PV_MPEG4  0x0\n#define PV_H263   0x1\n#define PV_FLV1   0x2\n\n#define MODE_INTRA      0x08 //01000\n#define MODE_INTRA_Q    0x09 //01001\n#define MODE_SKIPPED    0x10 //10000\n#define MODE_INTER4V    0x14 //10100\n#define MODE_INTER      0x16 //10110\n#define MODE_INTER_Q    0x17 //10111\n#define MODE_INTER4V_Q  0x15 //10101\n#define INTER_1VMASK    0x2\n#define Q_MASK          0x1\n#define INTRA_MASK      0x8\n#define INTER_MASK      0x4\n\n\n#define I_VOP       0\n#define P_VOP       1\n#define B_VOP       2\n\n#define LUMINANCE_DC_TYPE   1\n#define CHROMINANCE_DC_TYPE 2\n\n#define START_CODE_LENGTH       32\n\n/* 11/30/98 */\n#define NoMarkerFound -1\n#define FoundRM     1   /* Resync Marker */\n#define FoundVSC    2   /* VOP_START_CODE. */\n#define FoundGSC    3   /* GROUP_START_CODE */\n#define FoundEOB    4   /* EOB_CODE */\n\n/* PacketVideo \"absolution timestamp\" object.   06/13/2000 */\n#define PVTS_START_CODE         0x01C4\n#define PVTS_START_CODE_LENGTH  32\n\n/* session layer and vop layer start codes */\n\n#define VISUAL_OBJECT_SEQUENCE_START_CODE   0x01B0\n#define VISUAL_OBJECT_SEQUENCE_END_CODE     0x01B1\n\n#define VISUAL_OBJECT_START_CODE   0x01B5\n#define VO_START_CODE           0x8\n#define VO_HEADER_LENGTH        32      /* lengtho of VO header: VO_START_CODE +  VO_ID */\n\n#define SOL_START_CODE          0x01BE\n#define SOL_START_CODE_LENGTH   32\n\n#define VOL_START_CODE 0x12\n#define VOL_START_CODE_LENGTH 28\n\n#define VOP_START_CODE 0x1B6\n#define VOP_START_CODE_LENGTH   32\n\n#define GROUP_START_CODE    0x01B3\n#define GROUP_START_CODE_LENGTH  32\n\n#define VOP_ID_CODE_LENGTH      5\n#define VOP_TEMP_REF_CODE_LENGTH    16\n\n#define USER_DATA_START_CODE        0x01B2\n#define USER_DATA_START_CODE_LENGTH 32\n\n#define START_CODE_PREFIX       0x01\n#define START_CODE_PREFIX_LENGTH    24\n\n#define SHORT_VIDEO_START_MARKER         0x20\n#define SHORT_VIDEO_START_MARKER_LENGTH  22\n#define SHORT_VIDEO_END_MARKER            0x3F\n\n#define FLV1_VIDEO_START_MARKER         0x10\n#define FLV1_VIDEO_START_MARKER_LENGTH  21\n\n#define GOB_RESYNC_MARKER         0x01\n#define GOB_RESYNC_MARKER_LENGTH  17\n\n/* motion and resync markers used in error resilient mode  */\n\n#define DC_MARKER                      438273\n#define DC_MARKER_LENGTH                19\n\n#define MOTION_MARKER_COMB             126977\n#define MOTION_MARKER_COMB_LENGTH       17\n\n#define MOTION_MARKER_SEP              81921\n#define MOTION_MARKER_SEP_LENGTH        17\n\n#define RESYNC_MARKER           1\n#define RESYNC_MARKER_LENGTH    17\n\n#define SPRITE_NOT_USED     0\n#define STATIC_SPRITE       1\n#define ONLINE_SPRITE       2\n#define GMC_SPRITE      3\n\n/* macroblock and block size */\n#define MB_SIZE 16\n#define NCOEFF_MB (MB_SIZE*MB_SIZE)\n#define B_SIZE 8\n#define NCOEFF_BLOCK (B_SIZE*B_SIZE)\n#define NCOEFF_Y NCOEFF_MB\n#define NCOEFF_U NCOEFF_BLOCK\n#define NCOEFF_V NCOEFF_BLOCK\n#define BLK_PER_MB      4   /* Number of blocks per MB */\n\n/* VLC decoding related definitions */\n#define VLC_ERROR   (-1)\n#define VLC_ESCAPE  7167\n\n\n/* macro utility */\n#define  ZERO_OUT_64BYTES(x)    { *((uint32*)x) = *(((uint32*)(x))+1) =  \\\n        *(((uint32*)(x))+2) = *(((uint32*)(x))+3) =  \\\n        *(((uint32*)(x))+4) = *(((uint32*)(x))+5) =  \\\n        *(((uint32*)(x))+6) = *(((uint32*)(x))+7) =  \\\n        *(((uint32*)(x))+8) = *(((uint32*)(x))+9) =  \\\n        *(((uint32*)(x))+10) = *(((uint32*)(x))+11) =  \\\n        *(((uint32*)(x))+12) = *(((uint32*)(x))+13) =  \\\n        *(((uint32*)(x))+14) = *(((uint32*)(x))+15) =  0; }\n\n\n\n#endif /* _PVDECDEF_H_ */\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/mp4lib_int.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef _MP4LIB_INT_H_\n#define _MP4LIB_INT_H_\n\n#include \"mp4def.h\"\n#include \"mp4dec_api.h\" // extra structure\n\n#undef ENABLE_LOG\n#define BITRATE_AVERAGE_WINDOW 4\n#define FRAMERATE_SCALE ((BITRATE_AVERAGE_WINDOW-1)*10000L)\n#define FAST_IDCT            /* , for fast Variable complexity IDCT */\n//#define PV_DEC_EXTERNAL_IDCT  /*  for separate IDCT (i.e. no direct access to output frame) */\n#define PV_ANNEX_IJKT_SUPPORT\n#define mid_gray 1024\n\ntypedef struct tagBitstream\n{\n    /* function that reteive data from outside the library.   04/11/2000 */\n    /*    In frame-based decoding mode, this shall be NULL.   08/29/2000 */\n    uint32 curr_word;\n    uint32 next_word;\n    uint8 *bitstreamBuffer; /* pointer to buffer memory */\n    int32  read_point;          /* starting point in the buffer to be read to cache */\n    int  incnt;             /* bit left in cached */\n    int  incnt_next;\n    uint32 bitcnt;          /* total bit read so-far (from inbfr)*/\n    int32  data_end_pos;        /*should be added ,  06/07/2000 */\n    int searched_frame_boundary;\n} BitstreamDecVideo, *LPBitstreamDecVideo;\n\n/* complexity estimation parameters */\ntypedef struct tagComplexity_Est\n{\n    uint8   text_1;             /* texture_complexity_estimation_set_1  */\n    uint8   text_2;             /* texture_complexity_estimation_set_2  */\n    uint8   mc;                 /* motion_compensation_complexity       */\n} Complexity_Est;\n\n\ntypedef struct tagVop\n{\n    PIXEL   *yChan;             /* The Y component */\n    PIXEL   *uChan;             /* The U component */\n    PIXEL   *vChan;             /* The V component */\n\n    uint32  timeStamp;          /* Vop TimeStamp in msec */\n\n    /* Actual syntax elements for VOP (standard) */\n    int     predictionType;     /* VOP prediction type */\n    uint    timeInc;            /* VOP time increment (relative to last mtb) */\n    int     vopCoded;\n    int     roundingType;\n    int     intraDCVlcThr;\n    int16       quantizer;          /* VOP quantizer */\n    int     fcodeForward;       /* VOP dynamic range of motion vectors */\n    int     fcodeBackward;      /* VOP dynamic range of motion vectors */\n    int     refSelectCode;      /* enhancement layer reference select code */\n\n    /* H.263 parameters */\n    int     gobNumber;\n    int     gobFrameID;\n    int     temporalRef;        /* temporal reference, roll over at 256 */\n    int     ETR;\n} Vop;\n\ntypedef struct tagVol\n{\n    int     volID;                  /* VOL identifier (for tracking) */\n    uint    timeIncrementResolution;/* VOL time increment */\n    int     nbitsTimeIncRes;        /* number of bits for time increment  */\n    uint        timeInc_offset;         /* timeInc offset for multiple VOP in a packet  */\n    uint32  moduloTimeBase;         /* internal decoder clock */\n    int     fixedVopRate;\n    BitstreamDecVideo   *bitstream; /* library bitstream buffer (input buffer) */\n\n    int     complexity_estDisable;  /* VOL disable complexity estimation */\n    int     complexity_estMethod;   /* VOL complexity estimation method */\n    Complexity_Est complexity;      /* complexity estimation flags      */\n\n    /* Error Resilience Flags */\n    int     errorResDisable;        /* VOL disable error resilence mode */\n    /*            (Use Resynch markers) */\n    int     useReverseVLC;          /* VOL reversible VLCs */\n    int     dataPartitioning;       /* VOL data partitioning */\n\n    /* Bit depth  */\n    uint    bitsPerPixel;\n//  int     mid_gray;               /* 2^(bits_per_pixel+2) */\n\n    /* Quantization related parameters */\n    int     quantPrecision;         /* Quantizer precision */\n    uint    quantType;              /* MPEG-4 or H.263 Quantization Type */\n    /* Added loaded quant mat,  05/22/2000 */\n    int     loadIntraQuantMat;      /* Load intra quantization matrix */\n    int     loadNonIntraQuantMat;   /* Load nonintra quantization matrix */\n    int     iqmat[64];              /* Intra quant.matrix */\n    int     niqmat[64];             /* Non-intra quant.matrix */\n\n    /* Parameters used for scalability */\n    int     scalability;            /* VOL scalability (flag) */\n    int     scalType;               /* temporal = 0, spatial = 1, both = 2 */\n\n    int     refVolID;               /* VOL id of reference VOL */\n    int     refSampDir;             /* VOL resol. of ref. VOL */\n    int     horSamp_n;              /* VOL hor. resampling of ref. VOL given by */\n    int     horSamp_m;              /*     sampfac = hor_samp_n/hor_samp_m      */\n    int     verSamp_n;              /* VOL ver. resampling of ref. VOL given by */\n    int     verSamp_m;              /*     sampfac = ver_samp_n/ver_samp_m      */\n    int     enhancementType;        /* VOL type of enhancement layer */\n    /* profile and level */\n    int32   profile_level_id;       /* 8-bit profile and level */ //  6/17/04\n\n} Vol;\n\n\ntypedef int16 typeMBStore[6][NCOEFF_BLOCK];\n\ntypedef struct tagMacroBlock\n{\n    typeMBStore         block;              /* blocks */         /*  ACDC */\n    uint8   pred_block[384];        /* prediction block,  Aug 3,2005 */\n    uint8   bitmapcol[6][8];\n    uint8   bitmaprow[6];\n    int     no_coeff[6];\n    int     DCScalarLum;                        /* Luminance DC Scalar */\n    int     DCScalarChr;                        /* Chrominance DC Scalar */\n#ifdef PV_ANNEX_IJKT_SUPPORT\n    int direction;\n#endif\n} MacroBlock;\n\ntypedef struct tagHeaderInfoDecVideo\n{\n    uint8       *Mode;              /* Modes INTRA/INTER/etc. */\n    uint8       *CBP;               /* MCBPC/CBPY stuff */\n} HeaderInfoDecVideo;\n\n\n/************************************************************/\n/*                  VLC structures                          */\n/************************************************************/\ntypedef struct tagTcoef\n{\n    uint last;\n    uint run;\n    int level;\n    uint sign;\n} Tcoef, *LPTcoef;\n\n\n\ntypedef struct tagVLCtab\n{\n    int32 val;\n    int32 len;\n} VLCtab, *LPVLCtab;\n\ntypedef struct tagVLCshorttab\n{\n    int16 val;\n    int16 len;\n} VLCshorttab, *LPVLCshorttab ; /* for space saving, Antoine Nguyen*/\n\ntypedef struct tagVLCtab2\n{\n    uint8 run;\n    uint8 level;\n    uint8 last;\n    uint8 len;\n} VLCtab2, *LPVLCtab2;  /* 10/24/2000 */\n\n/* This type is designed for fast access of DC/AC */\n/*    prediction data.  If the compiler is smart  */\n/*    enough, it will use shifting for indexing.  */\n/*     04/14/2000.                              */\n\ntypedef int16 typeDCStore[6];   /*  ACDC */\ntypedef int16 typeDCACStore[4][8];\n\n\n\n/* Global structure that can be passed around */\ntypedef struct tagVideoDecData\n{\n    BitstreamDecVideo   *bitstream; /* library bitstream buffer (input buffer) */\n    /* Data For Layers (Scalability) */\n    Vol             **vol;                  /* Data stored for each VOL */\n\n    /* Data used for reconstructing frames */\n    Vop             *currVop;               /* Current VOP (frame)  */\n    Vop             *prevVop;               /* Previous VOP (frame) */\n    /* Data used to facilitate multiple layer decoding.   05/04/2000 */\n    Vop             *prevEnhcVop;           /* New change to rid of memcpy().  04/24/2001 */\n    Vop             **vopHeader;            /* one for each layer.   08/29/2000 */\n\n    /* I/O structures */\n    MacroBlock      *mblock;                    /* Macroblock data structure */\n    uint8           *acPredFlag;                /*  */\n\n    /* scratch memory used in data partitioned mode */\n    typeDCStore     *predDC;        /*  The DC coeffs for each MB */\n    typeDCACStore   *predDCAC_row;\n    typeDCACStore   *predDCAC_col;\n\n    int             usePrevQP;              /* running QP decision switch */\n    uint8           *sliceNo;               /* Slice indicator for each MB  */\n    /*     changed this to a 1D   */\n    /*    array for optimization    */\n    MOT             *motX;                  /* Motion vector in X direction */\n    MOT             *motY;                  /* Motion vector in Y direction */\n    HeaderInfoDecVideo  headerInfo;         /* MB Header information */\n    int16           *QPMB;                  /* Quantizer value for each MB */\n\n    uint8           *pstprcTypCur;          /* Postprocessing type for current frame */\n    uint8           *pstprcTypPrv;          /* Postprocessing type for previous frame */\n    /* scratch memory used in all modes */\n    int             mbnum;                      /*  Macroblock number */\n    uint            mbnum_row;\n    int             mbnum_col;\n    /* I added these variables since they are used a lot.   04/13/2000 */\n    int     nMBPerRow, nMBPerCol;   /* number of MBs in each row & column    */\n    int     nTotalMB;\n    /* for short video header */\n    int     nMBinGOB;               /* number of MBs in GOB,  05/22/00 */\n    int     nGOBinVop;              /* number of GOB in Vop   05/22/00 */\n    /* VOL Dimensions */\n    int     width;                  /* Width */\n    int     height;                 /* Height */\n    int     displayWidth;               /* Handle image whose size is not a multiple of 16. */\n    int     displayHeight;              /*   This is the actual size.   08/09/2000        */\n    int32   size;\n    /* Miscellaneous data points to be passed */\n    int             frame_idx;              /* Current frame ID */\n    int             frameRate;              /* Output frame Rate (over 10 seconds) */\n    int32           duration;\n    uint32          currTimestamp;\n    int             currLayer;              /* Current frame layer  */\n    int     shortVideoHeader;       /* shortVideoHeader mode */\n    int     intra_acdcPredDisable;  /* VOL disable INTRA DC prediction */\n    int             numberOfLayers;         /* Number of Layers */\n    /* Frame to be used for concealment     07/07/2001 */\n    uint8           *concealFrame;\n    int             vop_coding_type;\n    /* framerate and bitrate statistics counters.   08/23/2000 */\n    int32           nBitsPerVop[BITRATE_AVERAGE_WINDOW];\n    uint32          prevTimestamp[BITRATE_AVERAGE_WINDOW];\n    int     nBitsForMBID;           /* how many bits required for MB number? */\n    /* total data memory used by the docder library.   08/23/2000 */\n    int32           memoryUsage;\n\n    /* flag to turn on/off error concealment or soft decoding */\n    int errorConcealment;\n\n    /* Application controls */\n    VideoDecControls    *videoDecControls;\n    int                 postFilterType;     /* Postfilter mode  04/25/00 */\n\n\n\n    PV_STATUS(*vlcDecCoeffIntra)(BitstreamDecVideo *stream, Tcoef *pTcoef/*, int intra_luma*/);\n    PV_STATUS(*vlcDecCoeffInter)(BitstreamDecVideo *stream, Tcoef *pTcoef);\n    int                 initialized;\n\n    /* Annex IJKT */\n    int     deblocking;\n    int     slice_structure;\n    int     modified_quant;\n    int     advanced_INTRA;\n    int16 QP_CHR;  /* ANNEX_T */\n} VideoDecData;\n\n/* for fast VLC+Dequant  10/12/2000*/\ntypedef int (*VlcDequantBlockFuncP)(void *video, int comp, int switched,\n                                    uint8 *bitmaprow, uint8 *bitmapcol);\n\n//////////////////////////////////////////////////////////////\n//                  Decoder structures                      //\n//////////////////////////////////////////////////////////////\n#endif /* _MP4LIB_INT_H_ */\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/packet_util.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"mp4dec_lib.h\"\n#include \"vlc_decode.h\"\n#include \"bitstream.h\"\n\n\n/***********************************************************CommentBegin******\n*       04/13/2000 : initial modification to the new PV-Decoder\n*                            Lib format.\n*       04/16/2001 : Removed PV_END_OF_BUFFER case, error resilience\n***********************************************************CommentEnd********/\nPV_STATUS PV_ReadVideoPacketHeader(VideoDecData *video, int *next_MB)\n{\n    PV_STATUS status;\n    Vol *currVol = video->vol[video->currLayer];\n    Vop *currVop = video->currVop;\n    BitstreamDecVideo *stream = video->bitstream;\n    int fcode_forward;\n    int resync_marker_length;\n    int nbits = video->nBitsForMBID;\n    uint32 tmpvar32;\n    uint tmpvar16;\n    int16 quantizer;\n    int nTotalMB = video->nTotalMB;\n\n    fcode_forward = currVop->fcodeForward;\n    resync_marker_length = 17;\n\n    if (currVop->predictionType != I_VOP) resync_marker_length = 16 + fcode_forward;\n\n    status = PV_BitstreamShowBitsByteAlign(stream, resync_marker_length, &tmpvar32);\n    /*  if (status != PV_SUCCESS && status != PV_END_OF_BUFFER) return status; */\n    if (tmpvar32 == RESYNC_MARKER)\n    {\n//      DecNextStartCode(stream);\n        PV_BitstreamByteAlign(stream);\n        BitstreamReadBits32(stream, resync_marker_length);\n\n        *next_MB = (int) BitstreamReadBits16(stream, nbits);\n//      if (*next_MB <= video->mbnum)   /*  needs more investigation */\n//          *next_MB = video->mbnum+1;\n\n        if (*next_MB >= nTotalMB)  /* fix  04/05/01 */\n        {\n            *next_MB = video->mbnum + 1;\n            if (*next_MB >= nTotalMB)    /* this check is needed  */\n                *next_MB = nTotalMB - 1;\n        }\n        quantizer = (int16) BitstreamReadBits16(stream, currVol->quantPrecision);\n        if (quantizer == 0) return PV_FAIL;        /*  04/03/01 */\n\n        currVop->quantizer = quantizer;\n\n        /* if we have HEC, read some redundant VOP header information */\n        /* this part needs improvement  04/05/01 */\n        if (BitstreamRead1Bits(stream))\n        {\n            int time_base = -1;\n\n            /* modulo_time_base (? bits) */\n            do\n            {\n                time_base++;\n                tmpvar16 = BitstreamRead1Bits(stream);\n            }\n            while (tmpvar16 == 1);\n\n            /* marker bit */\n            BitstreamRead1Bits(stream);\n\n            /* vop_time_increment (1-15 bits) */\n            BitstreamReadBits16(stream, currVol->nbitsTimeIncRes);\n\n            /* marker bit */\n            BitstreamRead1Bits(stream);\n\n            /* vop_prediction_type (2 bits) */\n            BitstreamReadBits16(stream, 2);\n\n            /* Added intra_dc_vlc_thr reading  */\n            BitstreamReadBits16(stream, 3);\n\n            /* fcodes */\n            if (currVop->predictionType != I_VOP)\n            {\n                fcode_forward = (int) BitstreamReadBits16(stream, 3);\n\n                if (currVop->predictionType == B_VOP)\n                {\n                    BitstreamReadBits16(stream, 3);\n                }\n            }\n\n        }\n    }\n    else\n    {\n        PV_BitstreamByteAlign(stream);  /*  */\n        status = BitstreamCheckEndBuffer(stream);   /* return end_of_VOP  03/30/01 */\n        if (status != PV_SUCCESS)\n        {\n            return status;\n        }\n        status = BitstreamShowBits32HC(stream, &tmpvar32);   /*  07/07/01 */\n        /* -16 = 0xFFFFFFF0*/\n        if ((tmpvar32 & 0xFFFFFFF0) == VISUAL_OBJECT_SEQUENCE_START_CODE) /* start code mask 00 00 01 */\n\n        {\n            /* we don't have to check for legl stuffing here.   05/08/2000 */\n            return PV_END_OF_VOP;\n        }\n        else\n        {\n            return PV_FAIL;\n        }\n    }\n\n    return PV_SUCCESS;\n}\n\n\n\n/***********************************************************CommentBegin******\n*       3/10/00  : initial modification to the\n*                new PV-Decoder Lib format.\n*       04/17/01 : remove PV_END_OF_BUFFER, error checking\n***********************************************************CommentEnd********/\nPV_STATUS PV_GobHeader(VideoDecData *video)\n{\n    uint32 tmpvar;\n    Vop *currVop = video->currVop;\n    BitstreamDecVideo *stream = video->bitstream;\n    int quantPrecision = 5;\n    int16 quantizer;\n\n    BitstreamShowBits32(stream, GOB_RESYNC_MARKER_LENGTH, &tmpvar);\n\n    if (tmpvar != GOB_RESYNC_MARKER)\n    {\n        PV_BitstreamShowBitsByteAlign(stream, GOB_RESYNC_MARKER_LENGTH, &tmpvar);\n\n        if (tmpvar != GOB_RESYNC_MARKER)\n        {\n            return PV_FAIL;\n        }\n        else\n            PV_BitstreamByteAlign(stream);  /* if bytealigned GOBHEADER search is performed */\n        /* then no more noforcestuffing  */\n    }\n\n    /* we've got a GOB header info here */\n    BitstreamShowBits32(stream, GOB_RESYNC_MARKER_LENGTH + 5, &tmpvar);\n    tmpvar &= 0x1F;\n\n    if (tmpvar == 0)\n    {\n        return PV_END_OF_VOP;\n    }\n\n    if (tmpvar == 31)\n    {\n        PV_BitstreamFlushBits(stream, GOB_RESYNC_MARKER_LENGTH + 5);\n        BitstreamByteAlignNoForceStuffing(stream);\n        return PV_END_OF_VOP;\n    }\n\n    PV_BitstreamFlushBits(stream, GOB_RESYNC_MARKER_LENGTH + 5);\n    currVop->gobNumber = (int) tmpvar;\n    if (currVop->gobNumber >= video->nGOBinVop) return PV_FAIL;\n    currVop->gobFrameID = (int) BitstreamReadBits16(stream, 2);\n    quantizer = (int16) BitstreamReadBits16(stream, quantPrecision);\n    if (quantizer == 0)   return PV_FAIL;         /*  04/03/01 */\n\n    currVop->quantizer = quantizer;\n    return PV_SUCCESS;\n}\n#ifdef PV_ANNEX_IJKT_SUPPORT\nPV_STATUS PV_H263SliceHeader(VideoDecData *video, int *next_MB)\n{\n    PV_STATUS status;\n    uint32 tmpvar;\n    Vop *currVop = video->currVop;\n    BitstreamDecVideo *stream = video->bitstream;\n    int nTotalMB = video->nTotalMB;\n    int16 quantizer;\n\n    PV_BitstreamShowBitsByteAlignNoForceStuffing(stream, 17, &tmpvar);\n    if (tmpvar == RESYNC_MARKER)\n    {\n        BitstreamByteAlignNoForceStuffing(stream);\n        PV_BitstreamFlushBits(stream, 17);\n        if (!BitstreamRead1Bits(stream))\n        {\n            return PV_FAIL;\n        }\n        *next_MB = BitstreamReadBits16(stream, video->nBitsForMBID);\n        if (*next_MB >= nTotalMB)  /* fix  04/05/01 */\n        {\n            *next_MB = video->mbnum + 1;\n            if (*next_MB >= nTotalMB)    /* this check is needed  */\n                *next_MB = nTotalMB - 1;\n        }\n        /* we will not parse sebp2 for large pictures 3GPP */\n        quantizer = (int16) BitstreamReadBits16(stream, 5);\n        if (quantizer == 0) return PV_FAIL;\n\n        currVop->quantizer = quantizer;\n        if (!BitstreamRead1Bits(stream))\n        {\n            return PV_FAIL;\n        }\n        currVop->gobFrameID = (int) BitstreamReadBits16(stream, 2);\n    }\n    else\n    {\n        status = BitstreamCheckEndBuffer(stream);   /* return end_of_VOP  03/30/01 */\n        if (status != PV_SUCCESS)\n        {\n            return status;\n        }\n        PV_BitstreamShowBitsByteAlign(stream, SHORT_VIDEO_START_MARKER_LENGTH, &tmpvar);\n\n        if (tmpvar == SHORT_VIDEO_START_MARKER)\n        {\n            /* we don't have to check for legal stuffing here.   05/08/2000 */\n            return PV_END_OF_VOP;\n        }\n        else\n        {\n            return PV_FAIL;\n        }\n    }\n    return PV_SUCCESS;\n}\n#endif\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/post_filter.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n\n#include    \"mp4dec_lib.h\"\n\n#ifdef PV_ANNEX_IJKT_SUPPORT\n#include    \"motion_comp.h\"\n#include \"mbtype_mode.h\"\nconst static int STRENGTH_tab[] = {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11, 12, 12, 12};\n#endif\n\n#ifdef PV_POSTPROC_ON\n/*----------------------------------------------------------------------------\n; FUNCTION CODE\n----------------------------------------------------------------------------*/\nvoid PostFilter(\n    VideoDecData *video,\n    int filter_type,\n    uint8 *output)\n{\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    uint8 *pp_mod;\n    int16 *QP_store;\n    int combined_with_deblock_filter;\n    int nTotalMB = video->nTotalMB;\n    int width, height;\n    int32 size;\n    int softDeblocking;\n    uint8 *decodedFrame = video->videoDecControls->outputFrame;\n    /*----------------------------------------------------------------------------\n    ; Function body here\n    ----------------------------------------------------------------------------*/\n    width = video->width;\n    height = video->height;\n    size = (int32)width * height;\n\n    oscl_memcpy(output, decodedFrame, size);\n    oscl_memcpy(output + size, decodedFrame + size, (size >> 2));\n    oscl_memcpy(output + size + (size >> 2), decodedFrame + size + (size >> 2), (size >> 2));\n\n    if (filter_type == 0)\n        return;\n\n    /* The softDecoding cutoff corresponds to ~93000 bps for QCIF 15fps clip  */\n    if (PVGetDecBitrate(video->videoDecControls) > (100*video->frameRate*(size >> 12)))  // MC_sofDeblock\n        softDeblocking = FALSE;\n    else\n        softDeblocking = TRUE;\n\n    combined_with_deblock_filter = filter_type & PV_DEBLOCK;\n    QP_store = video->QPMB;\n\n    /* Luma */\n    pp_mod = video->pstprcTypCur;\n\n    if ((filter_type & PV_DEBLOCK) && (filter_type & PV_DERING))\n    {\n        CombinedHorzVertRingFilter(output, width, height, QP_store, 0, pp_mod);\n    }\n    else\n    {\n        if (filter_type & PV_DEBLOCK)\n        {\n            if (softDeblocking)\n            {\n                CombinedHorzVertFilter(output, width, height,\n                                       QP_store, 0, pp_mod);\n            }\n            else\n            {\n                CombinedHorzVertFilter_NoSoftDeblocking(output, width, height,\n                                                        QP_store, 0, pp_mod);\n            }\n        }\n        if (filter_type & PV_DERING)\n        {\n            Deringing_Luma(output, width, height, QP_store,\n                           combined_with_deblock_filter, pp_mod);\n\n        }\n    }\n\n    /* Chroma */\n\n    pp_mod += (nTotalMB << 2);\n    output += size;\n\n    if ((filter_type & PV_DEBLOCK) && (filter_type & PV_DERING))\n    {\n        CombinedHorzVertRingFilter(output, (int)(width >> 1), (int)(height >> 1), QP_store, (int) 1, pp_mod);\n    }\n    else\n    {\n        if (filter_type & PV_DEBLOCK)\n        {\n            if (softDeblocking)\n            {\n                CombinedHorzVertFilter(output, (int)(width >> 1),\n                                       (int)(height >> 1), QP_store, (int) 1, pp_mod);\n            }\n            else\n            {\n                CombinedHorzVertFilter_NoSoftDeblocking(output, (int)(width >> 1),\n                                                        (int)(height >> 1), QP_store, (int) 1, pp_mod);\n            }\n        }\n        if (filter_type & PV_DERING)\n        {\n            Deringing_Chroma(output, (int)(width >> 1),\n                             (int)(height >> 1), QP_store,\n                             combined_with_deblock_filter, pp_mod);\n        }\n    }\n\n    pp_mod += nTotalMB;\n    output += (size >> 2);\n\n    if ((filter_type & PV_DEBLOCK) && (filter_type & PV_DERING))\n    {\n        CombinedHorzVertRingFilter(output, (int)(width >> 1), (int)(height >> 1), QP_store, (int) 1, pp_mod);\n    }\n    else\n    {\n        if (filter_type & PV_DEBLOCK)\n        {\n            if (softDeblocking)\n            {\n                CombinedHorzVertFilter(output, (int)(width >> 1),\n                                       (int)(height >> 1), QP_store, (int) 1, pp_mod);\n            }\n            else\n            {\n                CombinedHorzVertFilter_NoSoftDeblocking(output, (int)(width >> 1),\n                                                        (int)(height >> 1), QP_store, (int) 1, pp_mod);\n            }\n        }\n        if (filter_type & PV_DERING)\n        {\n            Deringing_Chroma(output, (int)(width >> 1),\n                             (int)(height >> 1), QP_store,\n                             combined_with_deblock_filter, pp_mod);\n        }\n    }\n\n    /*  swap current pp_mod to prev_frame pp_mod */\n    pp_mod = video->pstprcTypCur;\n    video->pstprcTypCur = video->pstprcTypPrv;\n    video->pstprcTypPrv = pp_mod;\n\n    /*----------------------------------------------------------------------------\n    ; Return nothing or data or data pointer\n    ----------------------------------------------------------------------------*/\n    return;\n}\n#endif\n\n\n#ifdef PV_ANNEX_IJKT_SUPPORT\nvoid H263_Deblock(uint8 *rec,\n                  int width,\n                  int height,\n                  int16 *QP_store,\n                  uint8 *mode,\n                  int chr, int annex_T)\n{\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    int i, j, k;\n    uint8 *rec_y;\n    int tmpvar;\n    int mbnum, strength, A_D, d1_2, d1, d2, A, B, C, D, b_size;\n    int d, offset, nMBPerRow, nMBPerCol, width2 = (width << 1);\n    /* MAKE SURE I-VOP INTRA MACROBLOCKS ARE SET TO NON-SKIPPED MODE*/\n    mbnum = 0;\n\n    if (chr)\n    {\n        nMBPerRow = width >> 3;\n        nMBPerCol = height >> 3;\n        b_size = 8;\n    }\n    else\n    {\n        nMBPerRow = width >> 4;\n        nMBPerCol = height >> 4;\n        b_size = 16;\n    }\n\n\n    /********************************* VERTICAL FILTERING ****************************/\n    /* vertical filtering of mid sections no need to check neighboring QP's etc */\n    if (!chr)\n    {\n        rec_y = rec + (width << 3);\n        for (i = 0; i < (height >> 4); i++)\n        {\n            for (j = 0; j < (width >> 4); j++)\n            {\n                if (mode[mbnum] != MODE_SKIPPED)\n                {\n                    k = 16;\n                    strength = STRENGTH_tab[QP_store[mbnum]];\n                    while (k--)\n                    {\n                        A =  *(rec_y - width2);\n                        D = *(rec_y + width);\n                        A_D = A - D;\n                        C = *rec_y;\n                        B = *(rec_y - width);\n                        d = (((C - B) << 2) + A_D);\n\n                        if (d < 0)\n                        {\n                            d1 = -(-d >> 3);\n                            if (d1 < -(strength << 1))\n                            {\n                                d1 = 0;\n                            }\n                            else if (d1 < -strength)\n                            {\n                                d1 = -d1 - (strength << 1);\n                            }\n                            d1_2 = -d1 >> 1;\n                        }\n                        else\n                        {\n                            d1 = d >> 3;\n                            if (d1 > (strength << 1))\n                            {\n                                d1 = 0;\n                            }\n                            else if (d1 > strength)\n                            {\n                                d1 = (strength << 1) - d1;\n                            }\n                            d1_2 = d1 >> 1;\n                        }\n\n                        if (A_D < 0)\n                        {\n                            d2 = -(-A_D >> 2);\n                            if (d2 < -d1_2)\n                            {\n                                d2 = -d1_2;\n                            }\n                        }\n                        else\n                        {\n                            d2 = A_D >> 2;\n                            if (d2 > d1_2)\n                            {\n                                d2 = d1_2;\n                            }\n                        }\n\n                        *(rec_y - width2) = A - d2;\n                        tmpvar = B + d1;\n                        CLIP_RESULT(tmpvar)\n                        *(rec_y - width) = tmpvar;\n                        tmpvar = C - d1;\n                        CLIP_RESULT(tmpvar)\n                        *rec_y = tmpvar;\n                        *(rec_y + width) = D + d2;\n                        rec_y++;\n                    }\n                }\n                else\n                {\n                    rec_y += b_size;\n                }\n                mbnum++;\n            }\n            rec_y += (15 * width);\n\n        }\n    }\n\n    /* VERTICAL boundary blocks */\n\n\n    rec_y = rec + width * b_size;\n\n    mbnum = nMBPerRow;\n    for (i = 0; i < nMBPerCol - 1; i++)\n    {\n        for (j = 0; j < nMBPerRow; j++)\n        {\n            if (mode[mbnum] != MODE_SKIPPED || mode[mbnum - nMBPerRow] != MODE_SKIPPED)\n            {\n                k = b_size;\n                if (mode[mbnum] != MODE_SKIPPED)\n                {\n                    strength = STRENGTH_tab[(annex_T ?  MQ_chroma_QP_table[QP_store[mbnum]] : QP_store[mbnum])];\n                }\n                else\n                {\n                    strength = STRENGTH_tab[(annex_T ?  MQ_chroma_QP_table[QP_store[mbnum - nMBPerRow]] : QP_store[mbnum - nMBPerRow])];\n                }\n\n                while (k--)\n                {\n                    A =  *(rec_y - width2);\n                    D =  *(rec_y + width);\n                    A_D = A - D;\n                    C = *rec_y;\n                    B = *(rec_y - width);\n                    d = (((C - B) << 2) + A_D);\n\n                    if (d < 0)\n                    {\n                        d1 = -(-d >> 3);\n                        if (d1 < -(strength << 1))\n                        {\n                            d1 = 0;\n                        }\n                        else if (d1 < -strength)\n                        {\n                            d1 = -d1 - (strength << 1);\n                        }\n                        d1_2 = -d1 >> 1;\n                    }\n                    else\n                    {\n                        d1 = d >> 3;\n                        if (d1 > (strength << 1))\n                        {\n                            d1 = 0;\n                        }\n                        else if (d1 > strength)\n                        {\n                            d1 = (strength << 1) - d1;\n                        }\n                        d1_2 = d1 >> 1;\n                    }\n\n                    if (A_D < 0)\n                    {\n                        d2 = -(-A_D >> 2);\n                        if (d2 < -d1_2)\n                        {\n                            d2 = -d1_2;\n                        }\n                    }\n                    else\n                    {\n                        d2 = A_D >> 2;\n                        if (d2 > d1_2)\n                        {\n                            d2 = d1_2;\n                        }\n                    }\n\n                    *(rec_y - width2) = A - d2;\n                    tmpvar = B + d1;\n                    CLIP_RESULT(tmpvar)\n                    *(rec_y - width) = tmpvar;\n                    tmpvar = C - d1;\n                    CLIP_RESULT(tmpvar)\n                    *rec_y = tmpvar;\n                    *(rec_y + width) = D + d2;\n                    rec_y++;\n                }\n            }\n            else\n            {\n                rec_y += b_size;\n            }\n            mbnum++;\n        }\n        rec_y += ((b_size - 1) * width);\n\n    }\n\n\n    /***************************HORIZONTAL FILTERING ********************************************/\n    mbnum = 0;\n    /* HORIZONTAL INNER */\n    if (!chr)\n    {\n        rec_y = rec + 8;\n        offset = width * b_size - b_size;\n\n        for (i = 0; i < nMBPerCol; i++)\n        {\n            for (j = 0; j < nMBPerRow; j++)\n            {\n                if (mode[mbnum] != MODE_SKIPPED)\n                {\n                    k = 16;\n                    strength = STRENGTH_tab[QP_store[mbnum]];\n                    while (k--)\n                    {\n                        A =  *(rec_y - 2);\n                        D =  *(rec_y + 1);\n                        A_D = A - D;\n                        C = *rec_y;\n                        B = *(rec_y - 1);\n                        d = (((C - B) << 2) + A_D);\n\n                        if (d < 0)\n                        {\n                            d1 = -(-d >> 3);\n                            if (d1 < -(strength << 1))\n                            {\n                                d1 = 0;\n                            }\n                            else if (d1 < -strength)\n                            {\n                                d1 = -d1 - (strength << 1);\n                            }\n                            d1_2 = -d1 >> 1;\n                        }\n                        else\n                        {\n                            d1 = d >> 3;\n                            if (d1 > (strength << 1))\n                            {\n                                d1 = 0;\n                            }\n                            else if (d1 > strength)\n                            {\n                                d1 = (strength << 1) - d1;\n                            }\n                            d1_2 = d1 >> 1;\n                        }\n\n                        if (A_D < 0)\n                        {\n                            d2 = -(-A_D >> 2);\n                            if (d2 < -d1_2)\n                            {\n                                d2 = -d1_2;\n                            }\n                        }\n                        else\n                        {\n                            d2 = A_D >> 2;\n                            if (d2 > d1_2)\n                            {\n                                d2 = d1_2;\n                            }\n                        }\n\n                        *(rec_y - 2) = A - d2;\n                        tmpvar = B + d1;\n                        CLIP_RESULT(tmpvar)\n                        *(rec_y - 1) = tmpvar;\n                        tmpvar = C - d1;\n                        CLIP_RESULT(tmpvar)\n                        *rec_y = tmpvar;\n                        *(rec_y + 1) = D + d2;\n                        rec_y += width;\n                    }\n                    rec_y -= offset;\n                }\n                else\n                {\n                    rec_y += b_size;\n                }\n                mbnum++;\n            }\n            rec_y += (15 * width);\n\n        }\n    }\n\n\n\n    /* HORIZONTAL EDGE */\n    rec_y = rec + b_size;\n    offset = width * b_size - b_size;\n    mbnum = 1;\n    for (i = 0; i < nMBPerCol; i++)\n    {\n        for (j = 0; j < nMBPerRow - 1; j++)\n        {\n            if (mode[mbnum] != MODE_SKIPPED || mode[mbnum-1] != MODE_SKIPPED)\n            {\n                k = b_size;\n                if (mode[mbnum] != MODE_SKIPPED)\n                {\n                    strength = STRENGTH_tab[(annex_T ?  MQ_chroma_QP_table[QP_store[mbnum]] : QP_store[mbnum])];\n                }\n                else\n                {\n                    strength = STRENGTH_tab[(annex_T ?  MQ_chroma_QP_table[QP_store[mbnum - 1]] : QP_store[mbnum - 1])];\n                }\n\n                while (k--)\n                {\n                    A =  *(rec_y - 2);\n                    D =  *(rec_y + 1);\n                    A_D = A - D;\n                    C = *rec_y;\n                    B = *(rec_y - 1);\n                    d = (((C - B) << 2) + A_D);\n\n                    if (d < 0)\n                    {\n                        d1 = -(-d >> 3);\n                        if (d1 < -(strength << 1))\n                        {\n                            d1 = 0;\n                        }\n                        else if (d1 < -strength)\n                        {\n                            d1 = -d1 - (strength << 1);\n                        }\n                        d1_2 = -d1 >> 1;\n                    }\n                    else\n                    {\n                        d1 = d >> 3;\n                        if (d1 > (strength << 1))\n                        {\n                            d1 = 0;\n                        }\n                        else if (d1 > strength)\n                        {\n                            d1 = (strength << 1) - d1;\n                        }\n                        d1_2 = d1 >> 1;\n                    }\n\n                    if (A_D < 0)\n                    {\n                        d2 = -(-A_D >> 2);\n                        if (d2 < -d1_2)\n                        {\n                            d2 = -d1_2;\n                        }\n                    }\n                    else\n                    {\n                        d2 = A_D >> 2;\n                        if (d2 > d1_2)\n                        {\n                            d2 = d1_2;\n                        }\n                    }\n\n                    *(rec_y - 2) = A - d2;\n                    tmpvar = B + d1;\n                    CLIP_RESULT(tmpvar)\n                    *(rec_y - 1) = tmpvar;\n                    tmpvar = C - d1;\n                    CLIP_RESULT(tmpvar)\n                    *rec_y = tmpvar;\n                    *(rec_y + 1) = D + d2;\n                    rec_y += width;\n                }\n                rec_y -= offset;\n            }\n            else\n            {\n                rec_y += b_size;\n            }\n            mbnum++;\n        }\n        rec_y += ((width * (b_size - 1)) + b_size);\n        mbnum++;\n    }\n\n    return;\n}\n#endif\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/post_proc.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef post_proc_H\n#define post_proc_H\n\n/*----------------------------------------------------------------------------\n; INCLUDES\n----------------------------------------------------------------------------*/\n#include    \"mp4dec_lib.h\"\n\n/*----------------------------------------------------------------------------\n; MACROS\n; Define module specific macros here\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; DEFINES\n; Include all pre-processor statements here.\n----------------------------------------------------------------------------*/\n#define UPDATE_PV_MAXPV_MIN(p,max,min) if ((p) > max) max=(p); else if ((p) < min) min = (p);\n\n#define     INDEX(x,thr)    (((x)>=thr)?1:0)\n#define     BLKSIZE     8\n#define     MBSIZE      16\n#define     DERING_THR  16\n\n/* version for fast Deblock filtering*/\n#define     KTh     4  /*threshold for soft filtering*/\n#define     KThH    4  /*threshold for hard filtering */\n\n#define     NoMMX\n\n/*----------------------------------------------------------------------------\n; EXTERNAL VARIABLES REFERENCES\n; Declare variables used in this module but defined elsewhere\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; SIMPLE TYPEDEF'S\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; ENUMERATED TYPEDEF'S\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; STRUCTURES TYPEDEF'S\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; GLOBAL FUNCTION DEFINITIONS\n; Function Prototype declaration\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; END\n----------------------------------------------------------------------------*/\n#endif\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/post_proc_semaphore.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*\n------------------------------------------------------------------------------\n INPUT AND OUTPUT DEFINITIONS\n\n Inputs:\n    q_block = pointer to buffer of inverse quantized DCT coefficients of type\n              int for intra-VOP mode or buffer of residual data of type int\n              for inter-VOP mode\n\n Local Stores/Buffers/Pointers Needed:\n    None\n\n Global Stores/Buffers/Pointers Needed:\n    None\n\n Outputs:\n    postmode = post processing semaphore with the vertical deblocking,\n               horizontal deblocking, and deringing bits set up accordingly\n\n Pointers and Buffers Modified:\n    None\n\n Local Stores Modified:\n    None\n\n Global Stores Modified:\n    None\n\n------------------------------------------------------------------------------\n FUNCTION DESCRIPTION\n\n This function sets up the postmode semaphore based on the contents of the\n buffer pointed to by q_block. The function starts out with the assumption\n that all entries of q_block, except for the first entry (q_block[0]), are\n zero. This case can induce horizontal and vertical blocking artifacts,\n therefore, both horizontal and vertical deblocking bits are enabled.\n\n The following conditions are tested when setting up the horizontal/vertical\n deblocking and deringing bits:\n 1. When only the elements of the top row of the B_SIZE x B_SIZE block\n    (q_block[n], n = 0,..., B_SIZE-1) are non-zero, vertical blocking artifacts\n    may result, therefore, only the vertical deblocking bit is enabled.\n    Otherwise, the vertical deblocking bit is disabled.\n 2. When only the elements of the far left column of the B_SIZE x B_SIZE block\n    (q_block[n*B_SIZE], n = 0, ..., B_SIZE-1) are non-zero, horizontal blocking\n    artifacts may result, therefore, only the horizontal deblocking bit is\n    enabled. Otherwise, the horizontal deblocking bit is disabled.\n 3. If any non-zero elements exist in positions other than q_block[0],\n    q_block[1], or q_block[B_SIZE], the deringing bit is enabled. Otherwise,\n    it is disabled.\n\n The 3 least significant bits of postmode defines vertical or horizontal\n deblocking and deringing.\n\n The valid values are shown below:\n -------------------------------------------------------\n |           Type                 | Enabled | Disabled |\n -------------------------------------------------------\n | Vertical Deblocking (Bit #0)   |    1    |     0    |\n -------------------------------------------------------\n | Horizontal Deblocking (Bit #1) |    1    |     0    |\n -------------------------------------------------------\n | Deringing (Bit #2)             |    1    |     0    |\n -------------------------------------------------------\n\n*/\n\n\n/*----------------------------------------------------------------------------\n; INCLUDES\n----------------------------------------------------------------------------*/\n#include    \"mp4dec_lib.h\"\n#include    \"mp4def.h\"\n#include    \"post_proc.h\"\n\n/*----------------------------------------------------------------------------\n; MACROS\n; Define module specific macros here\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; DEFINES\n; Include all pre-processor statements here. Include conditional\n; compile variables also.\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; LOCAL FUNCTION DEFINITIONS\n; Function Prototype declaration\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; LOCAL STORE/BUFFER/POINTER DEFINITIONS\n; Variable declaration - defined here and used outside this module\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; EXTERNAL FUNCTION REFERENCES\n; Declare functions defined elsewhere and referenced in this module\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES\n; Declare variables used in this module but defined elsewhere\n----------------------------------------------------------------------------*/\n#ifdef PV_POSTPROC_ON\n/*----------------------------------------------------------------------------\n; FUNCTION CODE\n----------------------------------------------------------------------------*/\nint PostProcSemaphore(\n    int16 *q_block)\n{\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    int i, j;\n\n    /* Set default value to vertical and horizontal deblocking enabled */\n    /* Initial assumption is that only q_block[0] element is non-zero, */\n    /* therefore, vertical and horizontal deblocking bits are set to 1 */\n    int postmode = 0x3;\n\n    /*----------------------------------------------------------------------------\n    ; Function body here\n    ----------------------------------------------------------------------------*/\n    /* Vertical deblocking bit is enabled when only the entire top row of   */\n    /* the B_SIZE x B_SIZE block, i.e., q_block[n], n = 0,..., B_SIZE-1,    */\n    /* are non-zero. Since initial assumption is that all elements, except  */\n    /* q_block[0], is zero, we need to check the remaining elements in the  */\n    /* top row to  determine if all or some are non-zero.                   */\n    if (q_block[1] != 0)\n    {\n        /* At this point, q_block[0] and q_block[1] are non-zero, while */\n        /* q_block[n], n = 2,..., B_SIZE-1, are zero. Therefore, we     */\n        /* need to disable vertical deblocking                          */\n        postmode &= 0xE;\n    }\n\n    for (i = 2; i < B_SIZE; i++)\n    {\n        if (q_block[i])\n        {\n            /* Check if q_block[n], n = 2,..., B_SIZE-1, are non-zero.*/\n            /* If any of them turn out to be non-zero, we need to     */\n            /* disable vertical deblocking.                           */\n            postmode &= 0xE;\n\n            /* Deringing is enabled if any nonzero elements exist in */\n            /* positions other than q_block[0], q_block[1] or        */\n            /* q_block[B_SIZE].                                      */\n            postmode |= 0x4;\n\n            break;\n        }\n    }\n\n    /* Horizontal deblocking bit is enabled when only the entire far */\n    /* left column, i.e., q_block[n*B_SIZE], n = 0, ..., B_SIZE-1,   */\n    /* are non-zero. Since initial assumption is that all elements,  */\n    /* except q_block[0], is zero, we need to check the remaining    */\n    /* elements in the far left column to determine if all or some   */\n    /* are non-zero.                                                 */\n    if (q_block[B_SIZE])\n    {\n        /* At this point, only q_block[0] and q_block[B_SIZE] are non-zero, */\n        /* while q_block[n*B_SIZE], n = 2, 3,..., B_SIZE-1, are zero.       */\n        /* Therefore, we need to disable horizontal deblocking.             */\n        postmode &= 0xD;\n    }\n\n    for (i = 16; i < NCOEFF_BLOCK; i += B_SIZE)\n    {\n        if (q_block[i])\n        {\n            /* Check if q_block[n], n = 2*B_SIZE,...,(B_SIZE-1)*B_SIZE,  */\n            /* are non-zero. If any of them turn out to be non-zero,     */\n            /* we need to disable horizontal deblocking.                 */\n            postmode &= 0xD;\n\n            /* Deringing is enabled if any nonzero elements exist in */\n            /* positions other than q_block[0], q_block[1] or        */\n            /* q_block[B_SIZE].                                      */\n            postmode |= 0x4;\n\n            break;\n        }\n    }\n\n    /* At this point, only the first row and far left column elements */\n    /* have been tested. If deringing bit is still not set at this    */\n    /* point, check the rest of q_block to determine if the elements  */\n    /* are non-zero. If all elements, besides q_block[0], q_block[1], */\n    /* or q_block[B_SIZE] are non-zero, deringing bit must be set     */\n    if ((postmode & 0x4) == 0)\n    {\n        for (i = 1; i < B_SIZE; i++)\n        {\n            for (j = 1; j < B_SIZE; j++)\n            {\n                if (q_block[(i<<3)+j])\n                {\n                    /* At this point, q_block[0] and another q_block */\n                    /* element are non-zero, therefore, we need to   */\n                    /* disable vertical and horizontal deblocking    */\n                    postmode &= 0xC;\n\n                    /* Deringing is enabled if any nonzero elements exist in */\n                    /* positions other than q_block[0], q_block[1] or        */\n                    /* q_block[B_SIZE].                                      */\n                    postmode |= 0x4;\n\n                    /* Set outer FOR loop count to B_SIZE to get out of */\n                    /* outer FOR loop                                   */\n                    i = B_SIZE;\n\n                    /* Get out of inner FOR loop */\n                    break;\n                }\n            }\n        }\n    }\n\n    /*----------------------------------------------------------------------------\n    ; Return nothing or data or data pointer\n    ----------------------------------------------------------------------------*/\n    return (postmode);\n}\n\n#endif\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/pp_semaphore_chroma_inter.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*\n------------------------------------------------------------------------------\n INPUT AND OUTPUT DEFINITIONS\n\n Inputs:\n    xpred = x-axis coordinate of the block used for prediction (int)\n    ypred = y-axis coordinate of the block used for prediction (int)\n    pp_dec_u = pointer to the post processing semaphore for chrominance\n               (uint8)\n    pstprcTypPrv = pointer the previous frame's post processing type\n                   (uint8)\n    dx = horizontal component of the motion vector (int)\n    dy = vertical component of the motion vector (int)\n    mvwidth = number of blocks per row in the luminance VOP (int)\n    height = luminance VOP height in pixels (int)\n    size = total number of pixel in the current luminance VOP (int)\n    mv_loc = flag indicating location of the motion compensated\n         (x,y) position with respect to the luminance MB (int);\n         0 -> inside MB, 1 -> outside MB\n    msk_deblock = flag indicating whether to perform deblocking\n              (msk_deblock = 0) or not (msk_deblock = 1) (uint8)\n\n Local Stores/Buffers/Pointers Needed:\n    None\n\n Global Stores/Buffers/Pointers Needed:\n    None\n\n Outputs:\n    None\n\n Pointers and Buffers Modified:\n    pp_dec_u contents are the updated semaphore propagation data\n\n Local Stores Modified:\n    None\n\n Global Stores Modified:\n    None\n\n------------------------------------------------------------------------------\n FUNCTION DESCRIPTION\n\n This functions performs post processing semaphore propagation processing\n after chrominance prediction in interframe processing mode.\n\n*/\n\n\n/*----------------------------------------------------------------------------\n; INCLUDES\n----------------------------------------------------------------------------*/\n#include    \"mp4dec_api.h\"\n#include    \"mp4def.h\"\n\n/*----------------------------------------------------------------------------\n; MACROS\n; Define module specific macros here\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; DEFINES\n; Include all pre-processor statements here. Include conditional\n; compile variables also.\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; LOCAL FUNCTION DEFINITIONS\n; Function Prototype declaration\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; LOCAL STORE/BUFFER/POINTER DEFINITIONS\n; Variable declaration - defined here and used outside this module\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; EXTERNAL FUNCTION REFERENCES\n; Declare functions defined elsewhere and referenced in this module\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES\n; Declare variables used in this module but defined elsewhere\n----------------------------------------------------------------------------*/\n#ifdef PV_POSTPROC_ON\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n    /*----------------------------------------------------------------------------\n    ; FUNCTION CODE\n    ----------------------------------------------------------------------------*/\n    void pp_semaphore_chroma_inter(\n        int xpred,      /* i */\n        int ypred,      /* i */\n        uint8   *pp_dec_u,  /* i/o */\n        uint8   *pstprcTypPrv,  /* i */\n        int dx,     /* i */\n        int dy,     /* i */\n        int mvwidth,    /* i */\n        int height,     /* i */\n        int32   size,       /* i */\n        int mv_loc,     /* i */\n        uint8   msk_deblock /* i */\n    )\n    {\n        /*----------------------------------------------------------------------------\n        ; Define all local variables\n        ----------------------------------------------------------------------------*/\n        int mmvy, mmvx, nmvy, nmvx;\n        uint8 *pp_prev1, *pp_prev2, *pp_prev3, *pp_prev4;\n\n        /*----------------------------------------------------------------------------\n        ; Function body here\n        ----------------------------------------------------------------------------*/\n\n        /* 09/28/2000, modify semaphore propagation to */\n        /* accommodate smart indexing */\n        mmvx = xpred >> 4;  /* block x coor */\n        nmvx = mmvx;\n\n        mmvy = ypred >> 4;  /* block y coor */\n        nmvy = mmvy;\n\n        /* Check if MV is outside the frame */\n        if (mv_loc == 1)\n        {\n            /* Perform boundary check */\n            if (nmvx < 0)\n            {\n                nmvx = 0;\n            }\n            else if (nmvx > mvwidth - 1)\n            {\n                nmvx = mvwidth - 1;\n            }\n\n            if (nmvy < 0)\n            {\n                nmvy = 0;\n            }\n            else if (nmvy > (height >> 4) - 1)\n            {\n                nmvy = (height >> 4) - 1;\n            }\n        }\n\n        /* Calculate pointer to first chrominance b semaphores in       */\n        /* pstprcTypPrv, i.e., first chrominance b semaphore is in      */\n        /* (pstprcTypPrv + (size>>6)).                  */\n        /* Since total number of chrominance blocks per row in a VOP    */\n        /* is half of the total number of luminance blocks per row in a */\n        /* VOP, we use (mvwidth >> 1) when calculating the row offset.  */\n        pp_prev1 = pstprcTypPrv + (size >> 6) + nmvx + nmvy * (mvwidth >> 1) ;\n\n        /* Check if MV is a multiple of 16 */\n        /*  1/5/01, make sure it doesn't go out of bound */\n        if (((dy&0xF) != 0) && (mmvy + 1 < (height >> 4) - 1))\n        {   /* dy is not a multiple of 16 */\n\n            /* pp_prev3 is the block below pp_prev1 block */\n            pp_prev3 = pp_prev1 + (mvwidth >> 1);\n        }\n        else\n        {   /* dy is a multiple of 16 */\n            pp_prev3 = pp_prev1;\n        }\n\n        /*  1/5/01, make sure it doesn't go out of bound */\n        if (((dx&0xF) != 0) && (mmvx + 1 < (mvwidth >> 1) - 1))\n        {   /* dx is not a multiple of 16 */\n\n            /* pp_prev2 is the block to the right of pp_prev1 block */\n            pp_prev2 = pp_prev1 + 1;\n\n            /* pp_prev4 is the block to the right of the block */\n            /* below pp_prev1 block                */\n            pp_prev4 = pp_prev3 + 1;\n        }\n        else\n        {   /* dx is a multiple of 16 */\n\n            pp_prev2 = pp_prev1;\n            pp_prev4 = pp_prev3;\n        }\n\n        /* Advance offset to location of first Chrominance R semaphore in */\n        /* pstprcTypPrv. Since the number of pixels in a Chrominance VOP  */\n        /* is (number of pixels in Luminance VOP/4), and there are 64     */\n        /* pixels in an 8x8 Chrominance block, the offset can be      */\n        /* calculated as:                         */\n        /*  mv_loc = (number of pixels in Luminance VOP/(4*64))   */\n        /*         = size/256 = size>>8               */\n        mv_loc = (size >> 8);\n\n        /*  11/3/00, change the propagation for deblocking */\n        if (msk_deblock == 0)\n        {\n\n            /* Deblocking semaphore propagation for Chrominance */\n            /* b semaphores                     */\n            *(pp_dec_u) = 0;\n\n            /* Advance offset to point to Chrominance r semaphores */\n            pp_dec_u += mv_loc;\n\n            /* Deblocking semaphore propagation for Chrominance */\n            /* r semaphores                     */\n            *(pp_dec_u) = 0;\n        }\n        else\n        {\n            /* Deringing semaphore propagation for Chrominance B block */\n            if ((*(pp_dec_u)&4) == 0)\n            {\n                *(pp_dec_u) |= ((*(pp_prev1) | *(pp_prev2) |\n                                 *(pp_prev3) | *(pp_prev4)) & 0x4);\n            }\n\n            /* Advance offset to point to Chrominance r semaphores */\n            pp_dec_u += mv_loc;\n            pp_prev1 += mv_loc;\n            pp_prev2 += mv_loc;\n            pp_prev3 += mv_loc;\n            pp_prev4 += mv_loc;\n\n            /* Deringing semaphore propagation for Chrominance R */\n            if ((*(pp_dec_u)&4) == 0)\n            {\n                *(pp_dec_u) |= ((*(pp_prev1) | *(pp_prev2) |\n                                 *(pp_prev3) | *(pp_prev4)) & 0x4);\n            }\n        }\n\n        /*----------------------------------------------------------------------------\n        ; Return nothing or data or data pointer\n        ----------------------------------------------------------------------------*/\n        return;\n    }\n#ifdef __cplusplus\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/pp_semaphore_luma.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*\n------------------------------------------------------------------------------\n INPUT AND OUTPUT DEFINITIONS\n\n Inputs:\n    xpred = x-axis coordinate of the MB used for prediction (int)\n    ypred = y-axis coordinate of the MB used for prediction (int)\n    pp_dec_y = pointer to the post processing semaphore for current\n           luminance frame (uint8)\n    pstprcTypPrv = pointer the previous frame's post processing type\n                   (uint8)\n    ll = pointer to the buffer (int)\n    mv_loc = flag indicating location of the motion compensated\n         (x,y) position with respect to the luminance MB (int);\n         0 -> inside MB, 1 -> outside MB\n    dx = horizontal component of the motion vector (int)\n    dy = vertical component of the motion vector (int)\n    mvwidth = number of blocks per row (int)\n    width = luminance VOP width in pixels (int)\n    height = luminance VOP height in pixels (int)\n\n Local Stores/Buffers/Pointers Needed:\n    None\n\n Global Stores/Buffers/Pointers Needed:\n    None\n\n Outputs:\n    msk_deblock = flag that indicates whether deblocking is to be\n              performed (msk_deblock = 0) or not (msk_deblock =\n              1) (uint8)\n\n Pointers and Buffers Modified:\n    pp_dec_y contents are the updated semapohore propagation data\n\n Local Stores Modified:\n    None\n\n Global Stores Modified:\n    None\n\n------------------------------------------------------------------------------\n FUNCTION DESCRIPTION\n\n This functions performs post processing semaphore propagation processing\n after luminance prediction.\n\n*/\n\n\n/*----------------------------------------------------------------------------\n; INCLUDES\n----------------------------------------------------------------------------*/\n#include    \"mp4dec_api.h\"\n#include    \"mp4def.h\"\n\n/*----------------------------------------------------------------------------\n; MACROS\n; Define module specific macros here\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; DEFINES\n; Include all pre-processor statements here. Include conditional\n; compile variables also.\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; LOCAL FUNCTION DEFINITIONS\n; Function Prototype declaration\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; LOCAL STORE/BUFFER/POINTER DEFINITIONS\n; Variable declaration - defined here and used outside this module\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; EXTERNAL FUNCTION REFERENCES\n; Declare functions defined elsewhere and referenced in this module\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES\n; Declare variables used in this module but defined elsewhere\n----------------------------------------------------------------------------*/\n#ifdef PV_POSTPROC_ON\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n    /*----------------------------------------------------------------------------\n    ; FUNCTION CODE\n    ----------------------------------------------------------------------------*/\n    uint8 pp_semaphore_luma(\n        int xpred,      /* i */\n        int ypred,      /* i */\n        uint8   *pp_dec_y,  /* i/o */\n        uint8   *pstprcTypPrv,  /* i */\n        int *ll,        /* i */\n        int *mv_loc,    /* i/o */\n        int dx,     /* i */\n        int dy,     /* i */\n        int mvwidth,    /* i */\n        int width,      /* i */\n        int height      /* i */\n    )\n    {\n        /*----------------------------------------------------------------------------\n        ; Define all local variables\n        ----------------------------------------------------------------------------*/\n        int kk, mmvy, mmvx, nmvx, nmvy;\n        uint8   *pp_prev1, *pp_prev2, *pp_prev3, *pp_prev4;\n        uint8   msk_deblock = 0;        /*  11/3/00 */\n\n        /*----------------------------------------------------------------------------\n        ; Function body here\n        ----------------------------------------------------------------------------*/\n        /* Interframe Processing - 1 MV per MB */\n\n        /* check whether the MV points outside the frame */\n        if (xpred >= 0 && xpred <= ((width << 1) - (2*MB_SIZE)) && ypred >= 0 &&\n                ypred <= ((height << 1) - (2*MB_SIZE)))\n        {   /*****************************/\n            /* (x,y) is inside the frame */\n            /*****************************/\n\n            /*  10/24/2000 post_processing semaphore */\n            /* generation */\n\n            /*  10/23/2000 no boundary checking*/\n            *mv_loc = 0;\n\n            /* Calculate block x coordinate. Divide by 16 is for  */\n            /* converting half-pixel resolution to block          */\n            mmvx = xpred >> 4;\n\n            /* Calculate block y coordinate. Divide by 16 is for */\n            /* converting half-pixel resolution to block         */\n            mmvy = ypred >> 4;\n\n            /* Find post processing semaphore location for block */\n            /* used for prediction, i.e.,                */\n            /* pp_prev1 = &pstprcTypPrv[mmvy*mvwidth][mmvx]      */\n            pp_prev1 = pstprcTypPrv + mmvx + mmvy * mvwidth;\n\n            /* Check if MV is a multiple of 16 */\n            if ((dx&0xF) != 0)\n            {   /* dx is not a multiple of 16 */\n\n                /* pp_prev2 is the block to the right of */\n                /* pp_prev1 block            */\n                pp_prev2 = pp_prev1 + 1;\n\n                if ((dy&0xF) != 0)\n                {   /* dy is not a multiple of 16 */\n\n                    /* pp_prev3 is the block below */\n                    /* pp_prev1 block          */\n                    pp_prev3 = pp_prev1 + mvwidth;\n                }\n                else\n                {   /* dy is a multiple of 16 */\n\n                    pp_prev3 = pp_prev1;\n                }\n\n                /* pp_prev4 is the block to the right of */\n                /* pp_prev3 block.           */\n                pp_prev4 = pp_prev3 + 1;\n            }\n            else\n            {   /* dx is a multiple of 16 */\n\n                pp_prev2 = pp_prev1;\n\n                if ((dy&0xF) != 0)\n                {   /* dy is not a multiple of 16 */\n\n                    /* pp_prev3 is the block below */\n                    /* pp_prev1 block.         */\n                    pp_prev3 = pp_prev1 + mvwidth;\n                }\n                else\n                {   /* dy is a multiple of 16 */\n\n                    pp_prev3 = pp_prev1;\n                    msk_deblock = 0x3;\n                }\n\n                pp_prev4 = pp_prev3;\n            }\n\n            /* Perform post processing semaphore propagation for each */\n            /* of the 4 blocks in a MB.               */\n            for (kk = 0; kk < 4; kk++)\n            {\n                /* Deringing semaphore propagation */\n                if ((*(pp_dec_y) & 4) == 0)\n                {\n                    *(pp_dec_y) |= ((*(pp_prev1) | *(pp_prev2) |\n                                     *(pp_prev3) | *(pp_prev4)) & 0x4);\n                }\n                /* Deblocking semaphore propagation */\n                /*  11/3/00, change the propagation for deblocking */\n                if (msk_deblock == 0)\n                {\n                    *(pp_dec_y) = 0;\n                }\n\n                pp_dec_y += ll[kk];\n                pp_prev1 += ll[kk];\n                pp_prev2 += ll[kk];\n                pp_prev3 += ll[kk];\n                pp_prev4 += ll[kk];\n            }\n\n        }\n        else\n        {   /******************************/\n            /* (x,y) is outside the frame */\n            /******************************/\n\n            /*  10/24/2000 post_processing semaphore */\n            /* generation */\n\n            /*  10/23/2000 boundary checking*/\n            *mv_loc = 1;\n\n            /* Perform post processing semaphore propagation for each */\n            /* of the 4 blocks in a MB.               */\n            for (kk = 0; kk < 4; kk++)\n            {\n                /* Calculate block x coordinate and round (?).  */\n                /* Divide by 16 is for converting half-pixel    */\n                /* resolution to block.             */\n                mmvx = (xpred + ((kk & 1) << 3)) >> 4;\n                nmvx = mmvx;\n\n                /* Calculate block y coordinate and round (?).  */\n                /* Divide by 16 is for converting half-pixel    */\n                /* resolution to block.             */\n                mmvy = (ypred + ((kk & 2) << 2)) >> 4;\n                nmvy = mmvy;\n\n                /* Perform boundary checking */\n                if (nmvx < 0)\n                {\n                    nmvx = 0;\n                }\n                else if (nmvx > mvwidth - 1)\n                {\n                    nmvx = mvwidth - 1;\n                }\n\n                if (nmvy < 0)\n                {\n                    nmvy = 0;\n                }\n                else if (nmvy > (height >> 3) - 1)\n                {\n                    nmvy = (height >> 3) - 1;\n                }\n\n                /* Find post processing semaphore location for block */\n                /* used for prediction, i.e.,                */\n                /* pp_prev1 = &pstprcTypPrv[nmvy*mvwidth][nmvx]      */\n                pp_prev1 = pstprcTypPrv + nmvx + nmvy * mvwidth;\n\n                /* Check if x component of MV is a multiple of 16    */\n                /* and check if block x coordinate is out of bounds  */\n                if (((dx&0xF) != 0) && (mmvx + 1 < mvwidth - 1))\n                {   /* dx is not a multiple of 16 and the block */\n                    /* x coordinate is within the bounds        */\n\n                    /* pp_prev2 is the block to the right of */\n                    /* pp_prev1 block            */\n                    pp_prev2 = pp_prev1 + 1;\n\n                    /* Check if y component of MV is a multiple */\n                    /* of 16 and check if block y coordinate is */\n                    /* out of bounds                */\n                    if (((dy&0xF) != 0) && (mmvy + 1 < (height >> 3) - 1))\n                    {   /* dy is not a multiple of 16 and */\n                        /* the block y coordinate is      */\n                        /* within the bounds              */\n\n                        /* pp_prev3 is the block below */\n                        /* pp_prev1 block          */\n                        pp_prev3 = pp_prev1 + mvwidth;\n\n                        /* all prediction are from different blocks */\n                        msk_deblock = 0x3;\n                    }\n                    else\n                    {   /* dy is a multiple of 16 or the block */\n                        /* y coordinate is out of bounds       */\n\n                        pp_prev3 = pp_prev1;\n                    }\n\n                    /* pp_prev4 is the block to the right of */\n                    /* pp_prev3 block.           */\n                    pp_prev4 = pp_prev3 + 1;\n                }\n                else\n                {   /* dx is a multiple of 16 or the block x */\n                    /* coordinate is out of bounds           */\n\n                    pp_prev2 = pp_prev1;\n\n                    /* Check if y component of MV is a multiple */\n                    /* of 16 and check if block y coordinate is */\n                    /* out of bounds                */\n                    if (((dy&0xF) != 0) && (mmvy + 1 < (height >> 3) - 1))\n                    {   /* dy is not a multiple of 16 and */\n                        /* the block y coordinate is      */\n                        /* within the bounds              */\n\n                        /* pp_prev3 is the block below */\n                        /* pp_prev1 block.         */\n                        pp_prev3 = pp_prev1 + mvwidth;\n                    }\n                    else\n                    {   /* dy is a multiple of 16 or the block */\n                        /* y coordinate is out of bounds       */\n\n                        pp_prev3 = pp_prev1;\n                    }\n\n                    pp_prev4 = pp_prev3;\n                }\n\n                /* Deringing semaphore propagation */\n                if ((*(pp_dec_y)&4) == 0)\n                {\n                    *(pp_dec_y) |= ((*(pp_prev1) |\n                                     *(pp_prev2) | *(pp_prev3) |\n                                     *(pp_prev4)) & 0x4);\n                }\n                /* Deblocking semaphore propagation */\n                /*  11/3/00, change the propaga= */\n                /* tion for deblocking */\n                if (msk_deblock == 0)\n                {\n                    *(pp_dec_y) = 0;\n                }\n\n                pp_dec_y += ll[kk];\n            }\n        }\n\n        /*----------------------------------------------------------------------------\n        ; Return nothing or data or data pointer\n        ----------------------------------------------------------------------------*/\n        return (msk_deblock);\n    }\n#ifdef __cplusplus\n}\n#endif\n#endif\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/pvdec_api.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"mp4dec_lib.h\"\n#include \"vlc_decode.h\"\n#include \"bitstream.h\"\n\n#define OSCL_DISABLE_WARNING_CONDITIONAL_IS_CONSTANT\n#include \"osclconfig_compiler_warnings.h\"\n\n#ifdef DEC_INTERNAL_MEMORY_OPT\n#define QCIF_MBS 99\n#define QCIF_BS (4*QCIF_MBS)\n#define QCIF_MB_ROWS 11\nextern uint8                IMEM_sliceNo[QCIF_MBS];\nextern uint8                IMEM_acPredFlag[QCIF_MBS];\nextern uint8                IMEM_headerInfo_Mode[QCIF_MBS];\nextern uint8                IMEM_headerInfo_CBP[QCIF_MBS];\nextern int                  IMEM_headerInfo_QPMB[QCIF_MBS];\nextern MacroBlock           IMEM_mblock;\nextern MOT                  IMEM_motX[QCIF_BS];\nextern MOT                  IMEM_motY[QCIF_BS];\nextern BitstreamDecVideo    IMEM_BitstreamDecVideo[4];\nextern typeDCStore          IMEM_predDC[QCIF_MBS];\nextern typeDCACStore        IMEM_predDCAC_col[QCIF_MB_ROWS+1];\n\nextern VideoDecData         IMEM_VideoDecData[1];\nextern Vop                  IMEM_currVop[1];\nextern Vop                  IMEM_prevVop[1];\nextern PIXEL                IMEM_currVop_yChan[QCIF_MBS*128*3];\nextern PIXEL                IMEM_prevVop_yChan[QCIF_MBS*128*3];\nextern uint8                IMEM_pstprcTypCur[6*QCIF_MBS];\nextern uint8                IMEM_pstprcTypPrv[6*QCIF_MBS];\n\n\nextern Vop                  IMEM_vopHEADER[2];\nextern Vol                  IMEM_VOL[2];\nextern Vop                  IMEM_vopHeader[2][1];\nextern Vol                  IMEM_vol[2][1];\n\n#endif\n\n/* ======================================================================== */\n/*  Function : PVInitVideoDecoder()                                         */\n/*  Date     : 04/11/2000, 08/29/2000                                       */\n/*  Purpose  : Initialization of the MPEG-4 video decoder library.          */\n/*             The return type is Bool instead of PV_STATUS because         */\n/*             we don't want to expose PV_STATUS to (outside) programmers   */\n/*             that use our decoder library SDK.                            */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/* ======================================================================== */\nOSCL_EXPORT_REF Bool PVInitVideoDecoder(VideoDecControls *decCtrl, uint8 *volbuf[],\n                                        int32 *volbuf_size, int nLayers, int width, int height, MP4DecodingMode mode)\n{\n    VideoDecData *video = (VideoDecData *) decCtrl->videoDecoderData;\n    Bool status = PV_TRUE;\n    int idx;\n    BitstreamDecVideo *stream;\n\n\n    oscl_memset(decCtrl, 0, sizeof(VideoDecControls)); /* fix a size bug.   03/28/2001 */\n    decCtrl->nLayers = nLayers;\n    for (idx = 0; idx < nLayers; idx++)\n    {\n        decCtrl->volbuf[idx] = volbuf[idx];\n        decCtrl->volbuf_size[idx] = volbuf_size[idx];\n    }\n\n    /* memory allocation & initialization */\n#ifdef DEC_INTERNAL_MEMORY_OPT\n    video = IMEM_VideoDecData;\n#else\n    video = (VideoDecData *) oscl_malloc(sizeof(VideoDecData));\n#endif\n    if (video != NULL)\n    {\n        oscl_memset(video, 0, sizeof(VideoDecData));\n        video->memoryUsage = sizeof(VideoDecData);\n        video->numberOfLayers = nLayers;\n#ifdef DEC_INTERNAL_MEMORY_OPT\n        video->vol = (Vol **) IMEM_VOL;\n#else\n        video->vol = (Vol **) oscl_malloc(nLayers * sizeof(Vol *));\n#endif\n        if (video->vol == NULL) status = PV_FALSE;\n        video->memoryUsage += nLayers * sizeof(Vol *);\n\n\n        /* we need to setup this pointer for the application to */\n        /*    pass it around.                                   */\n        decCtrl->videoDecoderData = (void *) video;\n        video->videoDecControls = decCtrl;  /* yes. we have a cyclic */\n        /* references here :)    */\n\n        /* Allocating Vop space, this has to change when we add */\n        /*    spatial scalability to the decoder                */\n#ifdef DEC_INTERNAL_MEMORY_OPT\n        video->currVop = IMEM_currVop;\n        if (video->currVop == NULL) status = PV_FALSE;\n        else oscl_memset(video->currVop, 0, sizeof(Vop));\n        video->prevVop = IMEM_prevVop;\n        if (video->prevVop == NULL) status = PV_FALSE;\n        else oscl_memset(video->prevVop, 0, sizeof(Vop));\n        video->memoryUsage += (sizeof(Vop) * 2);\n        video->vopHeader = (Vop **) IMEM_vopHEADER;\n#else\n\n        video->currVop = (Vop *) oscl_malloc(sizeof(Vop));\n        if (video->currVop == NULL) status = PV_FALSE;\n        else oscl_memset(video->currVop, 0, sizeof(Vop));\n        video->prevVop = (Vop *) oscl_malloc(sizeof(Vop));\n        if (video->prevVop == NULL) status = PV_FALSE;\n        else oscl_memset(video->prevVop, 0, sizeof(Vop));\n        video->memoryUsage += (sizeof(Vop) * 2);\n\n        video->vopHeader = (Vop **) oscl_malloc(sizeof(Vop *) * nLayers);\n#endif\n        if (video->vopHeader == NULL) status = PV_FALSE;\n        else oscl_memset(video->vopHeader, 0, sizeof(Vop *)*nLayers);\n        video->memoryUsage += (sizeof(Vop *) * nLayers);\n\n        video->initialized = PV_FALSE;\n        /* Decode the header to get all information to allocate data */\n        if (status == PV_TRUE)\n        {\n            /* initialize decoded frame counter.   04/24/2001 */\n            video->frame_idx = -1;\n\n\n            for (idx = 0; idx < nLayers; idx++)\n            {\n\n#ifdef DEC_INTERNAL_MEMORY_OPT\n                video->vopHeader[idx] = IMEM_vopHeader[idx];\n#else\n                video->vopHeader[idx] = (Vop *) oscl_malloc(sizeof(Vop));\n#endif\n                if (video->vopHeader[idx] == NULL)\n                {\n                    status = PV_FALSE;\n                    break;\n                }\n                else\n                {\n                    oscl_memset(video->vopHeader[idx], 0, sizeof(Vop));\n                    video->vopHeader[idx]->timeStamp = 0;\n                    video->memoryUsage += (sizeof(Vop));\n                }\n#ifdef DEC_INTERNAL_MEMORY_OPT\n                video->vol[idx] = IMEM_vol[idx];\n                video->memoryUsage += sizeof(Vol);\n                oscl_memset(video->vol[idx], 0, sizeof(Vol));\n                if (video->vol[idx] == NULL) status = PV_FALSE;\n                stream = IMEM_BitstreamDecVideo;\n#else\n                video->vol[idx] = (Vol *) oscl_malloc(sizeof(Vol));\n                if (video->vol[idx] == NULL)\n                {\n                    status = PV_FALSE;\n                    break;\n                }\n                else\n                {\n                    video->memoryUsage += sizeof(Vol);\n                    oscl_memset(video->vol[idx], 0, sizeof(Vol));\n                }\n\n                stream = (BitstreamDecVideo *) oscl_malloc(sizeof(BitstreamDecVideo));\n#endif\n                video->memoryUsage += sizeof(BitstreamDecVideo);\n                if (stream == NULL)\n                {\n                    status = PV_FALSE;\n                    break;\n                }\n                else\n                {\n                    int32 buffer_size;\n                    if ((buffer_size = BitstreamOpen(stream, idx)) < 0)\n                    {\n                        mp4dec_log(\"InitVideoDecoder(): Can't allocate bitstream buffer.\\n\");\n                        status = PV_FALSE;\n                        break;\n                    }\n                    video->memoryUsage += buffer_size;\n                    video->vol[idx]->bitstream = stream;\n                    video->vol[idx]->volID = idx;\n                    video->vol[idx]->timeInc_offset = 0;  /*  11/12/01 */\n                    video->vlcDecCoeffIntra = &VlcDecTCOEFShortHeader;\n                    video->vlcDecCoeffInter = &VlcDecTCOEFShortHeader;\n                    if (mode == MPEG4_MODE)\n                    {\n                        /* Set up VOL header bitstream for frame-based decoding.  08/30/2000 */\n                        BitstreamReset(stream, decCtrl->volbuf[idx], decCtrl->volbuf_size[idx]);\n\n                        switch (DecodeVOLHeader(video, idx))\n                        {\n                            case PV_SUCCESS :\n                                if (status == PV_TRUE)\n                                    status = PV_TRUE;   /*  we want to make sure that if first layer is bad, second layer is good return PV_FAIL */\n                                else\n                                    status = PV_FALSE;\n                                break;\n#ifdef PV_TOLERATE_VOL_ERRORS\n                            case PV_BAD_VOLHEADER:\n                                status = PV_TRUE;\n                                break;\n#endif\n                            default :\n                                status = PV_FALSE;\n                                break;\n                        }\n\n                    }\n                    else\n                    {\n                        video->shortVideoHeader = PV_H263;\n                    }\n\n                    if (video->shortVideoHeader)\n                    {\n                        if (mode != FLV_MODE)\n                        {\n                            mode = H263_MODE;\n                        }\n                        else\n                        {\n                            video->shortVideoHeader = PV_FLV1;\n                        }\n\n                        /* Set max width and height.  In H.263 mode, we use    */\n                        /*  volbuf_size[0] to pass in width and volbuf_size[1] */\n                        /*  to pass in height.                    04/23/2001 */\n                        video->prevVop->temporalRef = 0; /*  11/12/01 */\n                        /* Compute some convenience variables:   04/23/2001 */\n                        video->vol[idx]->quantType = 0;\n                        video->vol[idx]->quantPrecision = 5;\n                        video->vol[idx]->errorResDisable = 1;\n                        video->vol[idx]->dataPartitioning = 0;\n                        video->vol[idx]->useReverseVLC = 0;\n                        video->intra_acdcPredDisable = 1;\n                        video->vol[idx]->scalability = 0;\n                        video->size = (int32)width * height;\n\n                        video->displayWidth = video->width = width;\n                        video->displayHeight = video->height = height;\n#ifdef PV_ANNEX_IJKT_SUPPORT\n                        video->modified_quant = 0;\n                        video->advanced_INTRA = 0;\n                        video->deblocking = 0;\n                        video->slice_structure = 0;\n#endif\n                    }\n\n                }\n            }\n\n        }\n        if (status != PV_FALSE)\n        {\n            if (mode == MPEG4_MODE /* || width !=0 && height !=0 */)\n            {\n                status = PVAllocVideoData(decCtrl, width, height, nLayers);\n                video->initialized = PV_TRUE;\n            }\n        }\n    }\n    else\n    {\n        status = PV_FALSE;\n    }\n\n    if (status == PV_FALSE) PVCleanUpVideoDecoder(decCtrl);\n\n    return status;\n}\n\nBool PVAllocVideoData(VideoDecControls *decCtrl, int width, int height, int nLayers)\n{\n    VideoDecData *video = (VideoDecData *) decCtrl->videoDecoderData;\n    Bool status = PV_TRUE;\n    int nTotalMB;\n    int nMBPerRow;\n    int32 size;\n\n    if (video->shortVideoHeader)\n    {\n        video->displayWidth = video->width = width;\n        video->displayHeight = video->height = height;\n\n        video->nMBPerRow =\n            video->nMBinGOB  = video->width / MB_SIZE;\n        video->nMBPerCol =\n            video->nGOBinVop = video->height / MB_SIZE;\n        video->nTotalMB =\n            video->nMBPerRow * video->nMBPerCol;\n    }\n\n    size = (int32)sizeof(PIXEL) * video->width * video->height;\n#ifdef PV_MEMORY_POOL\n    decCtrl->size = size;\n#else\n#ifdef DEC_INTERNAL_MEMORY_OPT\n    video->currVop->yChan = IMEM_currVop_yChan; /* Allocate memory for all VOP OKA 3/2/1*/\n    if (video->currVop->yChan == NULL) status = PV_FALSE;\n    video->currVop->uChan = video->currVop->yChan + size;\n    video->currVop->vChan = video->currVop->uChan + (size >> 2);\n\n    video->prevVop->yChan = IMEM_prevVop_yChan; /* Allocate memory for all VOP OKA 3/2/1*/\n    if (video->prevVop->yChan == NULL) status = PV_FALSE;\n    video->prevVop->uChan = video->prevVop->yChan + size;\n    video->prevVop->vChan = video->prevVop->uChan + (size >> 2);\n#else\n    video->currVop->yChan = (PIXEL *) oscl_malloc(size * 3 / 2); /* Allocate memory for all VOP OKA 3/2/1*/\n    if (video->currVop->yChan == NULL) status = PV_FALSE;\n\n    video->currVop->uChan = video->currVop->yChan + size;\n    video->currVop->vChan = video->currVop->uChan + (size >> 2);\n    video->prevVop->yChan = (PIXEL *) oscl_malloc(size * 3 / 2); /* Allocate memory for all VOP OKA 3/2/1*/\n    if (video->prevVop->yChan == NULL) status = PV_FALSE;\n\n    video->prevVop->uChan = video->prevVop->yChan + size;\n    video->prevVop->vChan = video->prevVop->uChan + (size >> 2);\n#endif\n    video->memoryUsage += (size * 3);\n#endif   // MEMORY_POOL\n    /* Note that baseVop, enhcVop is only used to hold enhancement */\n    /*    layer header information.                  05/04/2000  */\n    if (nLayers > 1)\n    {\n        video->prevEnhcVop = (Vop *) oscl_malloc(sizeof(Vop));\n        video->memoryUsage += (sizeof(Vop));\n        if (video->prevEnhcVop == NULL)\n        {\n            status = PV_FALSE;\n        }\n        else\n        {\n            oscl_memset(video->prevEnhcVop, 0, sizeof(Vop));\n#ifndef PV_MEMORY_POOL\n            video->prevEnhcVop->yChan = (PIXEL *) oscl_malloc(size * 3 / 2); /* Allocate memory for all VOP OKA 3/2/1*/\n            if (video->prevEnhcVop->yChan == NULL) status = PV_FALSE;\n            video->prevEnhcVop->uChan = video->prevEnhcVop->yChan + size;\n            video->prevEnhcVop->vChan = video->prevEnhcVop->uChan + (size >> 2);\n            video->memoryUsage += (3 * size / 2);\n#endif\n        }\n    }\n\n    /* Allocating space for slices, AC prediction flag, and */\n    /*    AC/DC prediction storage */\n    nTotalMB = video->nTotalMB;\n    nMBPerRow = video->nMBPerRow;\n\n#ifdef DEC_INTERNAL_MEMORY_OPT\n    video->sliceNo = (uint8 *)(IMEM_sliceNo);\n    if (video->sliceNo == NULL) status = PV_FALSE;\n    video->memoryUsage += nTotalMB;\n    video->acPredFlag = (uint8 *)(IMEM_acPredFlag);\n    if (video->acPredFlag == NULL) status = PV_FALSE;\n    video->memoryUsage += (nTotalMB);\n    video->predDC = (typeDCStore *)(IMEM_predDC);\n    if (video->predDC == NULL) status = PV_FALSE;\n    video->memoryUsage += (nTotalMB * sizeof(typeDCStore));\n    video->predDCAC_col = (typeDCACStore *)(IMEM_predDCAC_col);\n    if (video->predDCAC_col == NULL) status = PV_FALSE;\n    video->memoryUsage += ((nMBPerRow + 1) * sizeof(typeDCACStore));\n    video->predDCAC_row = video->predDCAC_col + 1;\n    video->headerInfo.Mode = (uint8 *)(IMEM_headerInfo_Mode);\n    if (video->headerInfo.Mode == NULL) status = PV_FALSE;\n    video->memoryUsage += nTotalMB;\n    video->headerInfo.CBP = (uint8 *)(IMEM_headerInfo_CBP);\n    if (video->headerInfo.CBP == NULL) status = PV_FALSE;\n    video->memoryUsage += nTotalMB;\n    video->QPMB = (int *)(IMEM_headerInfo_QPMB);\n    if (video->QPMB == NULL) status = PV_FALSE;\n    video->memoryUsage += (nTotalMB * sizeof(int));\n    video->mblock = &IMEM_mblock;\n    if (video->mblock == NULL) status = PV_FALSE;\n    oscl_memset(video->mblock->block, 0, sizeof(int16)*6*NCOEFF_BLOCK); //  Aug 23,2005\n\n    video->memoryUsage += sizeof(MacroBlock);\n    video->motX = (MOT *)(IMEM_motX);\n    if (video->motX == NULL) status = PV_FALSE;\n    video->motY = (MOT *)(IMEM_motY);\n    if (video->motY == NULL) status = PV_FALSE;\n    video->memoryUsage += (sizeof(MOT) * 8 * nTotalMB);\n#else\n    video->sliceNo = (uint8 *) oscl_malloc(nTotalMB);\n    if (video->sliceNo == NULL) status = PV_FALSE;\n    video->memoryUsage += nTotalMB;\n\n    video->acPredFlag = (uint8 *) oscl_malloc(nTotalMB * sizeof(uint8));\n    if (video->acPredFlag == NULL) status = PV_FALSE;\n    video->memoryUsage += (nTotalMB);\n\n    video->predDC = (typeDCStore *) oscl_malloc(nTotalMB * sizeof(typeDCStore));\n    if (video->predDC == NULL) status = PV_FALSE;\n    video->memoryUsage += (nTotalMB * sizeof(typeDCStore));\n\n    video->predDCAC_col = (typeDCACStore *) oscl_malloc((nMBPerRow + 1) * sizeof(typeDCACStore));\n    if (video->predDCAC_col == NULL) status = PV_FALSE;\n    video->memoryUsage += ((nMBPerRow + 1) * sizeof(typeDCACStore));\n\n    /* element zero will be used for storing vertical (col) AC coefficients */\n    /*  the rest will be used for storing horizontal (row) AC coefficients  */\n    video->predDCAC_row = video->predDCAC_col + 1;        /*  ACDC */\n\n    /* Allocating HeaderInfo structure & Quantizer array */\n    video->headerInfo.Mode = (uint8 *) oscl_malloc(nTotalMB);\n    if (video->headerInfo.Mode == NULL) status = PV_FALSE;\n    video->memoryUsage += nTotalMB;\n    video->headerInfo.CBP = (uint8 *) oscl_malloc(nTotalMB);\n    if (video->headerInfo.CBP == NULL) status = PV_FALSE;\n    video->memoryUsage += nTotalMB;\n    video->QPMB = (int16 *) oscl_malloc(nTotalMB * sizeof(int16));\n    if (video->QPMB == NULL) status = PV_FALSE;\n    video->memoryUsage += (nTotalMB * sizeof(int));\n\n    /* Allocating macroblock space */\n    video->mblock = (MacroBlock *) oscl_malloc(sizeof(MacroBlock));\n    if (video->mblock == NULL)\n    {\n        status = PV_FALSE;\n    }\n    else\n    {\n        oscl_memset(video->mblock->block, 0, sizeof(int16)*6*NCOEFF_BLOCK); //  Aug 23,2005\n\n        video->memoryUsage += sizeof(MacroBlock);\n    }\n    /* Allocating motion vector space */\n    video->motX = (MOT *) oscl_malloc(sizeof(MOT) * 4 * nTotalMB);\n    if (video->motX == NULL) status = PV_FALSE;\n    video->motY = (MOT *) oscl_malloc(sizeof(MOT) * 4 * nTotalMB);\n    if (video->motY == NULL) status = PV_FALSE;\n    video->memoryUsage += (sizeof(MOT) * 8 * nTotalMB);\n#endif\n\n#ifdef PV_POSTPROC_ON\n    /* Allocating space for post-processing Mode */\n#ifdef DEC_INTERNAL_MEMORY_OPT\n    video->pstprcTypCur = IMEM_pstprcTypCur;\n    video->memoryUsage += (nTotalMB * 6);\n    if (video->pstprcTypCur == NULL)\n    {\n        status = PV_FALSE;\n    }\n    else\n    {\n        oscl_memset(video->pstprcTypCur, 0, 4*nTotalMB + 2*nTotalMB);\n    }\n\n    video->pstprcTypPrv = IMEM_pstprcTypPrv;\n    video->memoryUsage += (nTotalMB * 6);\n    if (video->pstprcTypPrv == NULL)\n    {\n        status = PV_FALSE;\n    }\n    else\n    {\n        oscl_memset(video->pstprcTypPrv, 0, nTotalMB*6);\n    }\n\n#else\n    video->pstprcTypCur = (uint8 *) oscl_malloc(nTotalMB * 6);\n    video->memoryUsage += (nTotalMB * 6);\n    if (video->pstprcTypCur == NULL)\n    {\n        status = PV_FALSE;\n    }\n    else\n    {\n        oscl_memset(video->pstprcTypCur, 0, 4*nTotalMB + 2*nTotalMB);\n    }\n\n    video->pstprcTypPrv = (uint8 *) oscl_malloc(nTotalMB * 6);\n    video->memoryUsage += (nTotalMB * 6);\n    if (video->pstprcTypPrv == NULL)\n    {\n        status = PV_FALSE;\n    }\n    else\n    {\n        oscl_memset(video->pstprcTypPrv, 0, nTotalMB*6);\n    }\n\n#endif\n\n#endif\n\n    /* initialize the decoder library */\n    video->prevVop->predictionType = I_VOP;\n    video->prevVop->timeStamp = 0;\n#ifndef PV_MEMORY_POOL\n    oscl_memset(video->prevVop->yChan, 16, sizeof(uint8)*size);     /*  10/31/01 */\n    oscl_memset(video->prevVop->uChan, 128, sizeof(uint8)*size / 2);\n\n    oscl_memset(video->currVop->yChan, 0, sizeof(uint8)*size*3 / 2);\n    if (nLayers > 1)\n    {\n        oscl_memset(video->prevEnhcVop->yChan, 0, sizeof(uint8)*size*3 / 2);\n        video->prevEnhcVop->timeStamp = 0;\n    }\n    video->concealFrame = video->prevVop->yChan;               /*  07/07/2001 */\n    decCtrl->outputFrame = video->prevVop->yChan;              /*  06/19/2002 */\n#endif\n\n    /* always start from base layer */\n    video->currLayer = 0;\n    return status;\n}\n\n/* ======================================================================== */\n/*  Function : PVResetVideoDecoder()                                        */\n/*  Date     : 01/14/2002                                                   */\n/*  Purpose  : Reset video timestamps                                       */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/* ======================================================================== */\nBool PVResetVideoDecoder(VideoDecControls *decCtrl)\n{\n    VideoDecData *video = (VideoDecData *) decCtrl->videoDecoderData;\n    int idx;\n\n    for (idx = 0; idx < decCtrl->nLayers; idx++)\n    {\n        video->vopHeader[idx]->timeStamp = 0;\n    }\n    video->prevVop->timeStamp = 0;\n    if (decCtrl->nLayers > 1)\n        video->prevEnhcVop->timeStamp = 0;\n\n    oscl_memset(video->mblock->block, 0, sizeof(int16)*6*NCOEFF_BLOCK); //  Aug 23,2005\n\n    return PV_TRUE;\n}\n\n\n/* ======================================================================== */\n/*  Function : PVCleanUpVideoDecoder()                                      */\n/*  Date     : 04/11/2000, 08/29/2000                                       */\n/*  Purpose  : Cleanup of the MPEG-4 video decoder library.                 */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/* ======================================================================== */\nOSCL_EXPORT_REF Bool PVCleanUpVideoDecoder(VideoDecControls *decCtrl)\n{\n    int idx;\n    VideoDecData *video = (VideoDecData *) decCtrl->videoDecoderData;\n#ifdef DEC_INTERNAL_MEMORY_OPT\n    if (video)\n    {\n#ifdef PV_POSTPROC_ON\n        video->pstprcTypCur = NULL;\n        video->pstprcTypPrv = NULL;\n#endif\n\n        video->acPredFlag       = NULL;\n        video->sliceNo          = NULL;\n        video->motX             = NULL;\n        video->motY             = NULL;\n        video->mblock           = NULL;\n        video->QPMB             = NULL;\n        video->predDC           = NULL;\n        video->predDCAC_row     = NULL;\n        video->predDCAC_col     = NULL;\n        video->headerInfo.Mode  = NULL;\n        video->headerInfo.CBP   = NULL;\n        if (video->numberOfLayers > 1)\n        {\n            if (video->prevEnhcVop)\n            {\n                video->prevEnhcVop->uChan = NULL;\n                video->prevEnhcVop->vChan = NULL;\n                if (video->prevEnhcVop->yChan) oscl_free(video->prevEnhcVop->yChan);\n                oscl_free(video->prevEnhcVop);\n            }\n        }\n        if (video->currVop)\n        {\n            video->currVop->uChan = NULL;\n            video->currVop->vChan = NULL;\n            if (video->currVop->yChan)\n                video->currVop->yChan = NULL;\n            video->currVop = NULL;\n        }\n        if (video->prevVop)\n        {\n            video->prevVop->uChan = NULL;\n            video->prevVop->vChan = NULL;\n            if (video->prevVop->yChan)\n                video->prevVop->yChan = NULL;\n            video->prevVop = NULL;\n        }\n\n        if (video->vol)\n        {\n            for (idx = 0; idx < video->numberOfLayers; idx++)\n            {\n                if (video->vol[idx])\n                {\n                    BitstreamClose(video->vol[idx]->bitstream);\n                    video->vol[idx]->bitstream = NULL;\n                    video->vol[idx] = NULL;\n                }\n                video->vopHeader[idx] = NULL;\n\n            }\n            video->vol = NULL;\n            video->vopHeader = NULL;\n        }\n\n        video = NULL;\n        decCtrl->videoDecoderData = NULL;\n    }\n\n#else\n\n    if (video)\n    {\n#ifdef PV_POSTPROC_ON\n        if (video->pstprcTypCur) oscl_free(video->pstprcTypCur);\n        if (video->pstprcTypPrv) oscl_free(video->pstprcTypPrv);\n#endif\n        if (video->predDC) oscl_free(video->predDC);\n        video->predDCAC_row = NULL;\n        if (video->predDCAC_col) oscl_free(video->predDCAC_col);\n        if (video->motX) oscl_free(video->motX);\n        if (video->motY) oscl_free(video->motY);\n        if (video->mblock) oscl_free(video->mblock);\n        if (video->QPMB) oscl_free(video->QPMB);\n        if (video->headerInfo.Mode) oscl_free(video->headerInfo.Mode);\n        if (video->headerInfo.CBP) oscl_free(video->headerInfo.CBP);\n        if (video->sliceNo) oscl_free(video->sliceNo);\n        if (video->acPredFlag) oscl_free(video->acPredFlag);\n\n        if (video->numberOfLayers > 1)\n        {\n            if (video->prevEnhcVop)\n            {\n                video->prevEnhcVop->uChan = NULL;\n                video->prevEnhcVop->vChan = NULL;\n                if (video->prevEnhcVop->yChan) oscl_free(video->prevEnhcVop->yChan);\n                oscl_free(video->prevEnhcVop);\n            }\n        }\n        if (video->currVop)\n        {\n\n#ifndef PV_MEMORY_POOL\n            video->currVop->uChan = NULL;\n            video->currVop->vChan = NULL;\n            if (video->currVop->yChan)\n                oscl_free(video->currVop->yChan);\n#endif\n            oscl_free(video->currVop);\n        }\n        if (video->prevVop)\n        {\n#ifndef PV_MEMORY_POOL\n            video->prevVop->uChan = NULL;\n            video->prevVop->vChan = NULL;\n            if (video->prevVop->yChan)\n                oscl_free(video->prevVop->yChan);\n#endif\n            oscl_free(video->prevVop);\n        }\n\n        if (video->vol)\n        {\n            for (idx = 0; idx < video->numberOfLayers; idx++)\n            {\n                if (video->vol[idx])\n                {\n                    if (video->vol[idx]->bitstream)\n                    {\n                        BitstreamClose(video->vol[idx]->bitstream);\n                        oscl_free(video->vol[idx]->bitstream);\n                    }\n                    oscl_free(video->vol[idx]);\n                }\n\n            }\n            oscl_free(video->vol);\n        }\n\n        for (idx = 0; idx < video->numberOfLayers; idx++)\n        {\n            if (video->vopHeader[idx]) oscl_free(video->vopHeader[idx]);\n        }\n\n        if (video->vopHeader) oscl_free(video->vopHeader);\n\n        oscl_free(video);\n        decCtrl->videoDecoderData = NULL;\n    }\n#endif\n    return PV_TRUE;\n}\n/* ======================================================================== */\n/*  Function : PVGetVideoDimensions()                                       */\n/*  Date     : 040505                                                       */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : the display_width and display_height of                      */\n/*          the frame in the current layer.                                 */\n/*  Note     : This is not a macro or inline function because we do         */\n/*              not want to expose our internal data structure.             */\n/*  Modified :                                                              */\n/* ======================================================================== */\nOSCL_EXPORT_REF void PVGetVideoDimensions(VideoDecControls *decCtrl, int32 *display_width, int32 *display_height)\n{\n    VideoDecData *video = (VideoDecData *)decCtrl->videoDecoderData;\n    *display_width = video->displayWidth;\n    *display_height = video->displayHeight;\n}\n\n/* ======================================================================== */\n/*  Function : PVGetVideoTimeStamp()                                        */\n/*  Date     : 04/27/2000, 08/29/2000                                       */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : current time stamp in millisecond.                           */\n/*  Note     :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nuint32 PVGetVideoTimeStamp(VideoDecControls *decCtrl)\n{\n    VideoDecData *video = (VideoDecData *)decCtrl->videoDecoderData;\n    return video->currTimestamp;\n}\n\n\n/* ======================================================================== */\n/*  Function : PVSetPostProcType()                                          */\n/*  Date     : 07/07/2000                                                   */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : Set post-processing filter type.                             */\n/*  Note     :                                                              */\n/*  Modified : . 08/29/2000 changes the name for consistency.               */\n/* ======================================================================== */\nOSCL_EXPORT_REF void PVSetPostProcType(VideoDecControls *decCtrl, int mode)\n{\n    VideoDecData *video = (VideoDecData *)decCtrl->videoDecoderData;\n    video->postFilterType = mode;\n}\n\n\n/* ======================================================================== */\n/*  Function : PVGetDecBitrate()                                            */\n/*  Date     : 08/23/2000                                                   */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : This function returns the average bits per second.           */\n/*  Note     :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nint PVGetDecBitrate(VideoDecControls *decCtrl)\n{\n    VideoDecData *video = (VideoDecData *)decCtrl->videoDecoderData;\n    int     idx;\n    int32   sum = 0;\n\n    for (idx = 0; idx < BITRATE_AVERAGE_WINDOW; idx++)\n    {\n        sum += video->nBitsPerVop[idx];\n    }\n    sum = (sum * video->frameRate) / (10 * BITRATE_AVERAGE_WINDOW);\n    return (int) sum;\n}\n\n\n/* ======================================================================== */\n/*  Function : PVGetDecFramerate()                                          */\n/*  Date     : 08/23/2000                                                   */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : This function returns the average frame per 10 second.       */\n/*  Note     : The fps can be calculated by PVGetDecFramerate()/10          */\n/*  Modified :                                                              */\n/* ======================================================================== */\nint PVGetDecFramerate(VideoDecControls *decCtrl)\n{\n    VideoDecData *video = (VideoDecData *)decCtrl->videoDecoderData;\n\n    return video->frameRate;\n}\n\n/* ======================================================================== */\n/*  Function : PVGetOutputFrame()                                           */\n/*  Date     : 05/07/2001                                                   */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : This function returns the pointer to the output frame        */\n/*  Note     :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nuint8 *PVGetDecOutputFrame(VideoDecControls *decCtrl)\n{\n    return decCtrl->outputFrame;\n}\n\n/* ======================================================================== */\n/*  Function : PVGetLayerID()                                               */\n/*  Date     : 07/09/2001                                                   */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : This function returns decoded frame layer id (BASE/ENHANCE)  */\n/*  Note     :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nint PVGetLayerID(VideoDecControls *decCtrl)\n{\n    VideoDecData *video = (VideoDecData *)decCtrl->videoDecoderData;\n    return video->currLayer;\n}\n/* ======================================================================== */\n/*  Function : PVGetDecMemoryUsage()                                        */\n/*  Date     : 08/23/2000                                                   */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : This function returns the amount of memory used.             */\n/*  Note     :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nint32 PVGetDecMemoryUsage(VideoDecControls *decCtrl)\n{\n    VideoDecData *video = (VideoDecData *)decCtrl->videoDecoderData;\n    return video->memoryUsage;\n}\n\n\n/* ======================================================================== */\n/*  Function : PVGetDecBitstreamMode()                                      */\n/*  Date     : 08/23/2000                                                   */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : This function returns the decoding mode of the baselayer     */\n/*              bitstream.                                                  */\n/*  Note     :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nOSCL_EXPORT_REF MP4DecodingMode PVGetDecBitstreamMode(VideoDecControls *decCtrl)\n{\n    VideoDecData *video = (VideoDecData *)decCtrl->videoDecoderData;\n    if (video->shortVideoHeader)\n    {\n        if (video->shortVideoHeader & PV_FLV1)\n        {\n            return FLV_MODE;\n        }\n        else\n        {\n            return H263_MODE;\n        }\n    }\n    else\n    {\n        return MPEG4_MODE;\n    }\n}\n\n\n/* ======================================================================== */\n/*  Function : PVExtractVolHeader()                                         */\n/*  Date     : 08/29/2000                                                   */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : Extract vol header of the bitstream from buffer[].           */\n/*  Note     :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nBool PVExtractVolHeader(uint8 *video_buffer, uint8 *vol_header, int32 *vol_header_size)\n{\n    int idx = -1;\n    uint8 start_code_prefix[] = { 0x00, 0x00, 0x01 };\n    uint8 h263_prefix[] = { 0x00, 0x00, 0x80 };\n\n    if (oscl_memcmp(h263_prefix, video_buffer, 3) == 0) /* we have short header stream */\n    {\n        oscl_memcpy(vol_header, video_buffer, 32);\n        *vol_header_size = 32;\n        return TRUE;\n    }\n    else\n    {\n        if (oscl_memcmp(start_code_prefix, video_buffer, 3) ||\n                (video_buffer[3] != 0xb0 && video_buffer[3] >= 0x20)) return FALSE;\n\n        do\n        {\n            idx++;\n            while (oscl_memcmp(start_code_prefix, video_buffer + idx, 3))\n            {\n                idx++;\n                if (idx + 3 >= *vol_header_size) goto quit;\n            }\n        }\n        while (video_buffer[idx+3] != 0xb3 && video_buffer[idx+3] != 0xb6);\n\n        oscl_memcpy(vol_header, video_buffer, idx);\n        *vol_header_size = idx;\n        return TRUE;\n    }\n\nquit:\n    oscl_memcpy(vol_header, video_buffer, *vol_header_size);\n    return FALSE;\n}\n\n\n/* ======================================================================== */\n/*  Function : PVLocateFrameHeader()                                        */\n/*  Date     : 04/8/2005                                                    */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : Return the offset to the first SC in the buffer              */\n/*  Note     :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nint32 PVLocateFrameHeader(uint8 *ptr, int32 size)\n{\n    int count = 0;\n    int32 i = size;\n\n    if (size < 1)\n    {\n        return 0;\n    }\n    while (i--)\n    {\n        if ((count > 1) && (*ptr == 0x01))\n        {\n            i += 2;\n            break;\n        }\n\n        if (*ptr++)\n            count = 0;\n        else\n            count++;\n    }\n    return (size - (i + 1));\n}\n\n\n/* ======================================================================== */\n/*  Function : PVLocateH263FrameHeader()                                    */\n/*  Date     : 04/8/2005                                                    */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : Return the offset to the first SC in the buffer              */\n/*  Note     :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nint32 PVLocateH263FrameHeader(uint8 *ptr, int32 size)\n{\n    int count = 0;\n    int32 i = size;\n\n    if (size < 1)\n    {\n        return 0;\n    }\n\n    while (i--)\n    {\n        if ((count > 1) && ((*ptr & 0xFC) == 0x80))\n        {\n            i += 2;\n            break;\n        }\n\n        if (*ptr++)\n            count = 0;\n        else\n            count++;\n    }\n    return (size - (i + 1));\n}\n\n\n\n\n/* ======================================================================== */\n/*  Function : PVDecodeVideoFrame()                                         */\n/*  Date     : 08/29/2000                                                   */\n/*  Purpose  : Decode one video frame and return a YUV-12 image.            */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Note     :                                                              */\n/*  Modified : 04/17/2001 removed PV_EOS, PV_END_OF_BUFFER              */\n/*           : 08/22/2002 break up into 2 functions PVDecodeVopHeader and */\n/*                          PVDecodeVopBody                                 */\n/* ======================================================================== */\nOSCL_EXPORT_REF Bool PVDecodeVideoFrame(VideoDecControls *decCtrl, uint8 *buffer[],\n                                        uint32 timestamp[], int32 buffer_size[], uint use_ext_timestamp[], uint8 *currYUV)\n{\n    PV_STATUS status = PV_FAIL;\n    VopHeaderInfo header_info;\n\n    status = (PV_STATUS)PVDecodeVopHeader(decCtrl, buffer, timestamp, buffer_size, &header_info, use_ext_timestamp, currYUV);\n    if (status != PV_TRUE)\n        return PV_FALSE;\n\n    if (PVDecodeVopBody(decCtrl, buffer_size) != PV_TRUE)\n    {\n        return PV_FALSE;\n    }\n\n    return PV_TRUE;\n}\n\n/* ======================================================================== */\n/*  Function : PVDecodeVopHeader()                                          */\n/*  Date     : 08/22/2002                                                   */\n/*  Purpose  : Determine target layer and decode vop header, modified from  */\n/*              original PVDecodeVideoFrame.                                */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Note     :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nBool PVDecodeVopHeader(VideoDecControls *decCtrl, uint8 *buffer[],\n                       uint32 timestamp[], int32 buffer_size[], VopHeaderInfo *header_info, uint use_ext_timestamp [], uint8 *currYUV)\n{\n    VideoDecData *video = (VideoDecData *) decCtrl->videoDecoderData;\n    Vol *currVol;\n    Vop *currVop = video->currVop;\n    Vop **vopHeader = video->vopHeader;\n    BitstreamDecVideo *stream;\n\n    int target_layer;\n\n#ifdef PV_SUPPORT_TEMPORAL_SCALABILITY\n    PV_STATUS status = PV_FAIL;\n    int idx;\n    int32 display_time;\n\n    /* decide which frame to decode next */\n    if (decCtrl->nLayers > 1)\n    {\n        display_time = target_layer = -1;\n        for (idx = 0; idx < decCtrl->nLayers; idx++)\n        {\n            /* do we have data for this layer? */\n            if (buffer_size[idx] <= 0)\n            {\n                timestamp[idx] = -1;\n                continue;\n            }\n\n            /* did the application provide a timestamp for this vop? */\n            if (timestamp[idx] < 0)\n            {\n                if (vopHeader[idx]->timeStamp < 0)\n                {\n                    /* decode the timestamp in the bitstream */\n                    video->currLayer = idx;\n                    stream = video->vol[idx]->bitstream;\n                    BitstreamReset(stream, buffer[idx], buffer_size[idx]);\n\n                    while ((status = DecodeVOPHeader(video, vopHeader[idx], FALSE)) != PV_SUCCESS)\n                    {\n                        /* Try to find a VOP header in the buffer.   08/30/2000. */\n                        if (PVSearchNextM4VFrame(stream) != PV_SUCCESS)\n                        {\n                            /* if we don't have data for enhancement layer, */\n                            /*    don't just stop.   09/07/2000.          */\n                            buffer_size[idx] = 0;\n                            break;\n                        }\n                    }\n                    if (status == PV_SUCCESS)\n                    {\n                        vopHeader[idx]->timeStamp =\n                            timestamp[idx] = CalcVopDisplayTime(video->vol[idx], vopHeader[idx], video->shortVideoHeader);\n                        if (idx == 0) vopHeader[idx]->refSelectCode = 1;\n                    }\n                }\n                else\n                {\n                    /* We've decoded this vop header in the previous run already. */\n                    timestamp[idx] = vopHeader[idx]->timeStamp;\n                }\n            }\n\n            /* Use timestamps to select the next VOP to be decoded */\n            if (timestamp[idx] >= 0 && (display_time < 0 || display_time > timestamp[idx]))\n            {\n                display_time = timestamp[idx];\n                target_layer = idx;\n            }\n            else if (display_time == timestamp[idx])\n            {\n                /* we have to handle either SNR or spatial scalability here. */\n            }\n        }\n        if (target_layer < 0) return PV_FALSE;\n\n        /* set up for decoding the target layer */\n        video->currLayer = target_layer;\n        currVol = video->vol[target_layer];\n        video->bitstream = stream = currVol->bitstream;\n\n        /* We need to decode the vop header if external timestamp   */\n        /*    is provided.    10/04/2000                            */\n        if (vopHeader[target_layer]->timeStamp < 0)\n        {\n            stream = video->vol[target_layer]->bitstream;\n            BitstreamReset(stream, buffer[target_layer], buffer_size[target_layer]);\n\n            while (DecodeVOPHeader(video, vopHeader[target_layer], TRUE) != PV_SUCCESS)\n            {\n                /* Try to find a VOP header in the buffer.   08/30/2000. */\n                if (PVSearchNextM4VFrame(stream) != PV_SUCCESS)\n                {\n                    /* if we don't have data for enhancement layer, */\n                    /*    don't just stop.   09/07/2000.          */\n                    buffer_size[target_layer] = 0;\n                    break;\n                }\n            }\n            video->vol[target_layer]->timeInc_offset = vopHeader[target_layer]->timeInc;\n            video->vol[target_layer]->moduloTimeBase = timestamp[target_layer];\n            vopHeader[target_layer]->timeStamp = timestamp[target_layer];\n            if (target_layer == 0) vopHeader[target_layer]->refSelectCode = 1;\n        }\n    }\n    else /* base layer only decoding */\n    {\n#endif\n        video->currLayer = target_layer = 0;\n        currVol = video->vol[0];\n        video->bitstream = stream = currVol->bitstream;\n        if (buffer_size[0] <= 0) return PV_FALSE;\n        BitstreamReset(stream, buffer[0], buffer_size[0]);\n\n        if (video->shortVideoHeader)\n        {\n            while (DecodeShortHeader(video, vopHeader[0]) != PV_SUCCESS)\n            {\n                if (PVSearchNextH263Frame(stream) != PV_SUCCESS)\n                {\n                    /* There is no vop header in the buffer,    */\n                    /*   clean bitstream buffer.     2/5/2001   */\n                    buffer_size[0] = 0;\n                    if (video->initialized == PV_FALSE)\n                    {\n                        video->displayWidth = video->width = 0;\n                        video->displayHeight = video->height = 0;\n                    }\n                    return PV_FALSE;\n                }\n            }\n            if (video->initialized == PV_FALSE)\n            {\n                if (PVAllocVideoData(decCtrl, video->width, video->height, 1) == PV_FALSE)\n                {\n                    video->displayWidth = video->width = 0;\n                    video->displayHeight = video->height = 0;\n                    return PV_FALSE;\n                }\n                video->initialized = PV_TRUE;\n            }\n\n            if (use_ext_timestamp[0])\n            {\n                /* MTB for H263 is absolute TR */\n                /* following line is equivalent to  round((timestamp[0]*30)/1001);   11/13/2001 */\n                video->vol[0]->moduloTimeBase = 30 * ((timestamp[0] + 17) / 1001) + (30 * ((timestamp[0] + 17) % 1001) / 1001);\n                vopHeader[0]->timeStamp = timestamp[0];\n            }\n            else\n                vopHeader[0]->timeStamp = CalcVopDisplayTime(currVol, vopHeader[0], video->shortVideoHeader);\n        }\n        else\n        {\n            while (DecodeVOPHeader(video, vopHeader[0], FALSE) != PV_SUCCESS)\n            {\n                /* Try to find a VOP header in the buffer.   08/30/2000. */\n                if (PVSearchNextM4VFrame(stream) != PV_SUCCESS)\n                {\n                    /* There is no vop header in the buffer,    */\n                    /*   clean bitstream buffer.     2/5/2001   */\n                    buffer_size[0] = 0;\n                    return PV_FALSE;\n                }\n            }\n\n            if (use_ext_timestamp[0])\n            {\n                video->vol[0]->timeInc_offset = vopHeader[0]->timeInc;\n                video->vol[0]->moduloTimeBase = timestamp[0];  /*  11/12/2001 */\n                vopHeader[0]->timeStamp = timestamp[0];\n            }\n            else\n            {\n                vopHeader[0]->timeStamp = CalcVopDisplayTime(currVol, vopHeader[0], video->shortVideoHeader);\n            }\n        }\n\n        /* set up some base-layer only parameters */\n        vopHeader[0]->refSelectCode = 1;\n#ifdef PV_SUPPORT_TEMPORAL_SCALABILITY\n    }\n#endif\n    timestamp[target_layer] = video->currTimestamp = vopHeader[target_layer]->timeStamp;\n#ifdef PV_MEMORY_POOL\n    vopHeader[target_layer]->yChan = (PIXEL *)currYUV;\n    vopHeader[target_layer]->uChan = (PIXEL *)currYUV + decCtrl->size;\n    vopHeader[target_layer]->vChan = (PIXEL *)(vopHeader[target_layer]->uChan) + (decCtrl->size >> 2);\n#else\n    vopHeader[target_layer]->yChan = currVop->yChan;\n    vopHeader[target_layer]->uChan = currVop->uChan;\n    vopHeader[target_layer]->vChan = currVop->vChan;\n#endif\n    oscl_memcpy(currVop, vopHeader[target_layer], sizeof(Vop));\n\n#ifdef PV_SUPPORT_TEMPORAL_SCALABILITY\n    vopHeader[target_layer]->timeStamp = -1;\n#endif\n    /* put header info into the structure */\n    header_info->currLayer = target_layer;\n    header_info->timestamp = video->currTimestamp;\n    header_info->frameType = (MP4FrameType)currVop->predictionType;\n    header_info->refSelCode = vopHeader[target_layer]->refSelectCode;\n    header_info->quantizer = currVop->quantizer;\n    /***************************************/\n\n    return PV_TRUE;\n}\n\n\n/* ======================================================================== */\n/*  Function : PVDecodeVopBody()                                            */\n/*  Date     : 08/22/2002                                                   */\n/*  Purpose  : Decode vop body after the header is decoded, modified from   */\n/*              original PVDecodeVideoFrame.                                */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Note     :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nBool PVDecodeVopBody(VideoDecControls *decCtrl, int32 buffer_size[])\n{\n    PV_STATUS status = PV_FAIL;\n    VideoDecData *video = (VideoDecData *) decCtrl->videoDecoderData;\n    int target_layer = video->currLayer;\n    Vol *currVol = video->vol[target_layer];\n    Vop *currVop = video->currVop;\n    Vop *prevVop = video->prevVop;\n    Vop *tempVopPtr;\n    int bytes_consumed = 0; /* Record how many bits we used in the buffer.   04/24/2001 */\n\n    int idx;\n\n    if (currVop->vopCoded == 0)                  /*  07/03/2001 */\n    {\n        PV_BitstreamByteAlign(currVol->bitstream);\n        /* We should always clear up bitstream buffer.   10/10/2000 */\n        bytes_consumed = (getPointer(currVol->bitstream) + 7) >> 3;\n\n        if (bytes_consumed > currVol->bitstream->data_end_pos)\n        {\n            bytes_consumed = currVol->bitstream->data_end_pos;\n        }\n\n        if (bytes_consumed < buffer_size[target_layer])\n        {\n            /* If we only consume part of the bits in the buffer, take those */\n            /*  out.     04/24/2001 */\n            /*          oscl_memcpy(buffer[target_layer], buffer[target_layer]+bytes_consumed,\n                            (buffer_size[target_layer]-=bytes_consumed)); */\n            buffer_size[target_layer] -= bytes_consumed;\n        }\n        else\n        {\n            buffer_size[target_layer] = 0;\n        }\n#ifdef PV_MEMORY_POOL\n\n        if (target_layer)\n        {\n            if (video->prevEnhcVop->timeStamp > video->prevVop->timeStamp)\n            {\n                video->prevVop = video->prevEnhcVop;\n            }\n        }\n\n        oscl_memcpy(currVop->yChan, video->prevVop->yChan, (decCtrl->size*3) / 2);\n\n        video->prevVop = prevVop;\n\n        video->concealFrame = currVop->yChan;       /*  07/07/2001 */\n\n        video->vop_coding_type = currVop->predictionType; /*  07/09/01 */\n\n        decCtrl->outputFrame = currVop->yChan;\n\n        /* Swap VOP pointers.  No enhc. frame oscl_memcpy() anymore!   04/24/2001 */\n        if (target_layer)\n        {\n            tempVopPtr = video->prevEnhcVop;\n            video->prevEnhcVop = video->currVop;\n            video->currVop = tempVopPtr;\n        }\n        else\n        {\n            tempVopPtr = video->prevVop;\n            video->prevVop = video->currVop;\n            video->currVop = tempVopPtr;\n        }\n#else\n        if (target_layer)       /* this is necessary to avoid flashback problems   06/21/2002*/\n        {\n            video->prevEnhcVop->timeStamp = currVop->timeStamp;\n        }\n        else\n        {\n            video->prevVop->timeStamp = currVop->timeStamp;\n        }\n#endif\n        video->vop_coding_type = currVop->predictionType; /*  07/09/01 */\n        /* the following is necessary to avoid displaying an notCoded I-VOP at the beginning of a session\n        or after random positioning  07/03/02*/\n        if (currVop->predictionType == I_VOP)\n        {\n            video->vop_coding_type = P_VOP;\n        }\n\n\n        return PV_TRUE;\n    }\n    /* ======================================================= */\n    /*  Decode vop body (if there is no error in the header!)  */\n    /* ======================================================= */\n\n    /* first, we need to select a reference frame */\n    if (decCtrl->nLayers > 1)\n    {\n        if (currVop->predictionType == I_VOP)\n        {\n            /* do nothing here */\n        }\n        else if (currVop->predictionType == P_VOP)\n        {\n            switch (currVop->refSelectCode)\n            {\n                case 0 : /* most recently decoded enhancement vop */\n                    /* Setup video->prevVop before we call PV_DecodeVop().   04/24/2001 */\n                    if (video->prevEnhcVop->timeStamp >= video->prevVop->timeStamp)\n                        video->prevVop = video->prevEnhcVop;\n                    break;\n\n                case 1 : /* most recently displayed base-layer vop */\n                    if (target_layer)\n                    {\n                        if (video->prevEnhcVop->timeStamp > video->prevVop->timeStamp)\n                            video->prevVop = video->prevEnhcVop;\n                    }\n                    break;\n\n                case 2 : /* next base-layer vop in display order */\n                    break;\n\n                case 3 : /* temporally coincident base-layer vop (no MV's) */\n                    break;\n            }\n        }\n        else /* we have a B-Vop */\n        {\n            mp4dec_log(\"DecodeVideoFrame(): B-VOP not supported.\\n\");\n        }\n    }\n\n    /* This is for the calculation of the frame rate and bitrate. */\n    idx = ++video->frame_idx % BITRATE_AVERAGE_WINDOW;\n\n    /* Calculate bitrate for this layer.   08/23/2000 */\n    status = PV_DecodeVop(video);\n    video->nBitsPerVop[idx] = getPointer(currVol->bitstream);\n    video->prevTimestamp[idx] = currVop->timeStamp;\n\n    /* restore video->prevVop after PV_DecodeVop().   04/24/2001 */\n//  if (currVop->refSelectCode == 0) video->prevVop = prevVop;\n    video->prevVop = prevVop;\n\n    /* Estimate the frame rate.   08/23/2000 */\n    video->duration = video->prevTimestamp[idx];\n    video->duration -= video->prevTimestamp[(++idx)%BITRATE_AVERAGE_WINDOW];\n    if (video->duration > 0)\n    { /* Only update framerate when the timestamp is right */\n        video->frameRate = (int)(FRAMERATE_SCALE) / video->duration;\n    }\n\n    /* We should always clear up bitstream buffer.   10/10/2000 */\n    bytes_consumed = (getPointer(currVol->bitstream) + 7) >> 3; /*  11/4/03 */\n\n    if (bytes_consumed > currVol->bitstream->data_end_pos)\n    {\n        bytes_consumed = currVol->bitstream->data_end_pos;\n    }\n\n    if (bytes_consumed < buffer_size[target_layer])\n    {\n        /* If we only consume part of the bits in the buffer, take those */\n        /*  out.     04/24/2001 */\n        /*      oscl_memcpy(buffer[target_layer], buffer[target_layer]+bytes_consumed,\n                    (buffer_size[target_layer]-=bytes_consumed)); */\n        buffer_size[target_layer] -= bytes_consumed;\n    }\n    else\n    {\n        buffer_size[target_layer] = 0;\n    }\n    switch (status)\n    {\n        case PV_FAIL :\n            return PV_FALSE;        /* this will take care of concealment if we lose whole frame  */\n\n        case PV_END_OF_VOP :\n            /* we may want to differenciate PV_END_OF_VOP and PV_SUCCESS */\n            /*    in the future.     05/10/2000                      */\n\n        case PV_SUCCESS :\n            /* Nohting is wrong :). */\n\n\n            video->concealFrame = video->currVop->yChan;       /*  07/07/2001 */\n\n            video->vop_coding_type = video->currVop->predictionType; /*  07/09/01 */\n\n            decCtrl->outputFrame = video->currVop->yChan;\n\n            /* Swap VOP pointers.  No enhc. frame oscl_memcpy() anymore!   04/24/2001 */\n            if (target_layer)\n            {\n                tempVopPtr = video->prevEnhcVop;\n                video->prevEnhcVop = video->currVop;\n                video->currVop = tempVopPtr;\n            }\n            else\n            {\n                tempVopPtr = video->prevVop;\n                video->prevVop = video->currVop;\n                video->currVop = tempVopPtr;\n            }\n            break;\n\n        default :\n            /* This will never happen */\n            break;\n    }\n\n    return PV_TRUE;\n}\n\n#ifdef PV_MEMORY_POOL\nOSCL_EXPORT_REF void PVSetReferenceYUV(VideoDecControls *decCtrl, uint8 *YUV)\n{\n    VideoDecData *video = (VideoDecData *)decCtrl->videoDecoderData;\n    video->prevVop->yChan = (PIXEL *)YUV;\n    video->prevVop->uChan = (PIXEL *)YUV + decCtrl->size;\n    video->prevVop->vChan = (PIXEL *)video->prevVop->uChan + (decCtrl->size >> 2);\n    oscl_memset(video->prevVop->yChan, 16, sizeof(uint8)*decCtrl->size);     /*  10/31/01 */\n    oscl_memset(video->prevVop->uChan, 128, sizeof(uint8)*decCtrl->size / 2);\n    video->concealFrame = video->prevVop->yChan;               /*  07/07/2001 */\n    decCtrl->outputFrame = video->prevVop->yChan;              /*  06/19/2002 */\n}\n#endif\n\n\n/* ======================================================================== */\n/*  Function : VideoDecoderErrorDetected()                                  */\n/*  Date     : 06/20/2000                                                   */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : This function will be called everytime an error int the      */\n/*              bitstream is detected.                                      */\n/*  Note     :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nuint VideoDecoderErrorDetected(VideoDecData * video)\n{\n    OSCL_UNUSED_ARG(video);\n    /* This is only used for trapping bitstream error for debuging */\n    return 0;\n}\n\n#ifdef ENABLE_LOG\n#include <stdio.h>\n#include <stdarg.h>\n/* ======================================================================== */\n/*  Function : m4vdec_dprintf()                                             */\n/*  Date     : 08/15/2000                                                   */\n/*  Purpose  : This is a function that logs messages in the mpeg4 video     */\n/*             decoder.  We can call the standard PacketVideo PVMessage     */\n/*             from inside this function if necessary.                      */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Note     : To turn on the logging, LOG_MP4DEC_MESSAGE must be defined   */\n/*              when compiling this file (only this file).                  */\n/*  Modified :                                                              */\n/* ======================================================================== */\nvoid m4vdec_dprintf(char *format, ...)\n{\n    FILE *log_fp;\n    va_list args;\n    va_start(args, format);\n\n    /* open the log file */\n    log_fp = fopen(\"\\\\mp4dec_log.txt\", \"a+\");\n    if (log_fp == NULL) return;\n    /* output the message */\n    vfprintf(log_fp, format, args);\n    fclose(log_fp);\n\n    va_end(args);\n}\n#endif\n\n\n/* ======================================================================== */\n/*  Function : IsIntraFrame()                                               */\n/*  Date     : 05/29/2000                                                   */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : The most recently decoded frame is an Intra frame.           */\n/*  Note     :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nBool IsIntraFrame(VideoDecControls *decCtrl)\n{\n    VideoDecData *video = (VideoDecData *)decCtrl->videoDecoderData;\n    return (video->vop_coding_type == I_VOP);\n}\n\n/* ======================================================================== */\n/*  Function : PVDecPostProcess()                                           */\n/*  Date     : 01/09/2002                                                   */\n/*  Purpose  : PostProcess one video frame and return a YUV-12 image.       */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Note     :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nvoid PVDecPostProcess(VideoDecControls *decCtrl, uint8 *outputYUV)\n{\n    uint8 *outputBuffer;\n#ifdef PV_POSTPROC_ON\n    VideoDecData *video = (VideoDecData *) decCtrl->videoDecoderData;\n    int32 tmpvar;\n    if (outputYUV)\n    {\n        outputBuffer = outputYUV;\n    }\n    else\n    {\n        if (video->postFilterType)\n        {\n            outputBuffer = video->currVop->yChan;\n        }\n        else\n        {\n            outputBuffer = decCtrl->outputFrame;\n        }\n    }\n\n    if (video->postFilterType)\n    {\n        /* Post-processing,  */\n        PostFilter(video, video->postFilterType, outputBuffer);\n    }\n    else\n    {\n        if (outputYUV)\n        {\n            /* Copy decoded frame to the output buffer. */\n            tmpvar = (int32)video->width * video->height;\n            oscl_memcpy(outputBuffer, decCtrl->outputFrame, tmpvar*3 / 2);           /*  3/3/01 */\n        }\n    }\n#else\n    OSCL_UNUSED_ARG(outputYUV);\n    outputBuffer = decCtrl->outputFrame;\n#endif\n    decCtrl->outputFrame = outputBuffer;\n    return;\n}\n\n\n/* ======================================================================== */\n/*  Function : PVDecSetReference(VideoDecControls *decCtrl, uint8 *refYUV,  */\n/*                              int32 timestamp)                            */\n/*  Date     : 07/22/2003                                                   */\n/*  Purpose  : Get YUV reference frame from external source.                */\n/*  In/out   : YUV 4-2-0 frame containing new reference frame in the same   */\n/*   : dimension as original, i.e., doesn't have to be multiple of 16 !!!.  */\n/*  Return   :                                                              */\n/*  Note     :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nBool PVDecSetReference(VideoDecControls *decCtrl, uint8 *refYUV, uint32 timestamp)\n{\n    VideoDecData *video = (VideoDecData *) decCtrl->videoDecoderData;\n    Vop *prevVop = video->prevVop;\n    int width = video->width;\n    uint8 *dstPtr, *orgPtr, *dstPtr2, *orgPtr2;\n    int32 size = (int32)width * video->height;\n\n\n    /* set new parameters */\n    prevVop->timeStamp = timestamp;\n    prevVop->predictionType = I_VOP;\n\n    dstPtr = prevVop->yChan;\n    orgPtr = refYUV;\n    oscl_memcpy(dstPtr, orgPtr, size);\n    dstPtr = prevVop->uChan;\n    dstPtr2 = prevVop->vChan;\n    orgPtr = refYUV + size;\n    orgPtr2 = orgPtr + (size >> 2);\n    oscl_memcpy(dstPtr, orgPtr, (size >> 2));\n    oscl_memcpy(dstPtr2, orgPtr2, (size >> 2));\n\n    video->concealFrame = video->prevVop->yChan;\n    video->vop_coding_type = I_VOP;\n    decCtrl->outputFrame = video->prevVop->yChan;\n\n    return PV_TRUE;\n}\n\n/* ======================================================================== */\n/*  Function : PVDecSetEnhReference(VideoDecControls *decCtrl, uint8 *refYUV,   */\n/*                              int32 timestamp)                            */\n/*  Date     : 07/23/2003                                                   */\n/*  Purpose  : Get YUV enhance reference frame from external source.        */\n/*  In/out   : YUV 4-2-0 frame containing new reference frame in the same   */\n/*   : dimension as original, i.e., doesn't have to be multiple of 16 !!!.  */\n/*  Return   :                                                              */\n/*  Note     :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nBool PVDecSetEnhReference(VideoDecControls *decCtrl, uint8 *refYUV, uint32 timestamp)\n{\n    VideoDecData *video = (VideoDecData *) decCtrl->videoDecoderData;\n    Vop *prevEnhcVop = video->prevEnhcVop;\n    uint8 *dstPtr, *orgPtr, *dstPtr2, *orgPtr2;\n    int32 size = (int32) video->width * video->height;\n\n    if (video->numberOfLayers <= 1)\n        return PV_FALSE;\n\n\n    /* set new parameters */\n    prevEnhcVop->timeStamp = timestamp;\n    prevEnhcVop->predictionType = I_VOP;\n\n    dstPtr = prevEnhcVop->yChan;\n    orgPtr = refYUV;\n    oscl_memcpy(dstPtr, orgPtr, size);\n    dstPtr = prevEnhcVop->uChan;\n    dstPtr2 = prevEnhcVop->vChan;\n    orgPtr = refYUV + size;\n    orgPtr2 = orgPtr + (size >> 2);\n    oscl_memcpy(dstPtr, orgPtr, (size >> 2));\n    oscl_memcpy(dstPtr2, orgPtr2, (size >> 2));\n    video->concealFrame = video->prevEnhcVop->yChan;\n    video->vop_coding_type = I_VOP;\n    decCtrl->outputFrame = video->prevEnhcVop->yChan;\n\n    return PV_TRUE;\n}\n\n\n/* ======================================================================== */\n/*  Function : PVGetVolInfo()                                               */\n/*  Date     : 08/06/2003                                                   */\n/*  Purpose  : Get the vol info(only base-layer).                           */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Note     :                                                              */\n/*  Modified : 06/24/2004                                                   */\n/* ======================================================================== */\nBool PVGetVolInfo(VideoDecControls *decCtrl, VolInfo *pVolInfo)\n{\n    Vol *currVol;\n\n    if (pVolInfo == NULL || decCtrl == NULL || decCtrl->videoDecoderData == NULL ||\n            ((VideoDecData *)decCtrl->videoDecoderData)->vol[0] == NULL) return PV_FALSE;\n\n    currVol = ((VideoDecData *)(decCtrl->videoDecoderData))->vol[0];\n\n    // get the VOL info\n    pVolInfo->shortVideoHeader = (int32)((VideoDecData *)(decCtrl->videoDecoderData))->shortVideoHeader;\n    pVolInfo->dataPartitioning = (int32)currVol->dataPartitioning;\n    pVolInfo->errorResDisable  = (int32)currVol->errorResDisable;\n    pVolInfo->useReverseVLC    = (int32)currVol->useReverseVLC;\n    pVolInfo->scalability      = (int32)currVol->scalability;\n    pVolInfo->nbitsTimeIncRes  = (int32)currVol->nbitsTimeIncRes;\n    pVolInfo->profile_level_id = (int32)currVol->profile_level_id;\n\n    return PV_TRUE;\n}\n\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/pvm4vdecoder.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n//////////////////////////////////////////////////////////////////////////////////\n//                                                                              //\n//  File: pvm4vdecoder.cpp                                                  //\n//                                                                              //\n//////////////////////////////////////////////////////////////////////////////////\n\n#include \"oscl_mem.h\"\n#include \"mp4dec_api.h\"\n#include \"pvm4vdecoder.h\"\n\n\n#define OSCL_DISABLE_WARNING_FORCING_INT_TO_BOOL\n#include \"osclconfig_compiler_warnings.h\"\n\n\n/////////////////////////////////////////////////////////////////////////////\nPVM4VDecoder::PVM4VDecoder() : iVideoCtrls(NULL)\n{\n}\n\n\nPVM4VDecoder* PVM4VDecoder::New(void)\n{\n    PVM4VDecoder* self = new PVM4VDecoder;\n\n    if (self)\n    {\n        if (!self->Construct())\n        {\n            OSCL_DELETE(self);\n            self = NULL;\n        }\n    }\n\n    return self;\n}\n\nbool PVM4VDecoder::Construct()\n{\n    iVideoCtrls = (VideoDecControls *) new VideoDecControls;\n    if (iVideoCtrls)\n    {\n        oscl_memset(iVideoCtrls, 0, sizeof(VideoDecControls));\n        return true;\n    }\n    else\n    {\n        return false;\n    }\n}\n\n/////////////////////////////////////////////////////////////////////////////\nPVM4VDecoder::~PVM4VDecoder()\n{\n    if (iVideoCtrls)\n    {\n        OSCL_DELETE((VideoDecControls *)iVideoCtrls);\n        iVideoCtrls = NULL;\n    }\n}\n\n/////////////////////////////////////////////////////////////////////////////\nbool PVM4VDecoder::InitVideoDecoder(uint8 *volbuf[],\n                                    int32 *volbuf_size,\n                                    int32 nLayers,\n                                    int32* iWidth,\n                                    int32* iHeight,\n                                    int *mode)\n{\n    if (PVInitVideoDecoder((VideoDecControls *)iVideoCtrls, (uint8 **) volbuf, (int32*)volbuf_size, (int32)nLayers, *iWidth, *iHeight, (MP4DecodingMode) *mode))\n    {\n        GetVideoDimensions(iWidth, iHeight);\n        *mode = (int)PVGetDecBitstreamMode((VideoDecControls *)iVideoCtrls);\n        return true;\n    }\n    else\n    {\n        return false;\n    }\n\n}\n\n/////////////////////////////////////////////////////////////////////////////\nbool PVM4VDecoder::GetVolInfo(VolInfo* pVolInfo)\n{\n    if (!iVideoCtrls || !pVolInfo) return false;\n    if (PVGetVolInfo((VideoDecControls *)iVideoCtrls, pVolInfo))\n    {\n        return true;\n    }\n    else\n    {\n        return false;\n    }\n}\n\n/////////////////////////////////////////////////////////////////////////////\nvoid PVM4VDecoder::CleanUpVideoDecoder(void)\n{\n    PVCleanUpVideoDecoder((VideoDecControls *)iVideoCtrls);\n}\n\n/////////////////////////////////////////////////////////////////////////////\nbool PVM4VDecoder::DecodeVideoFrame(uint8 *bitstream[], uint32 *timestamp, int32 *buffer_size, uint *use_ext_timestamp, uint8 *currYUV)\n{\n    return PVDecodeVideoFrame((VideoDecControls *)iVideoCtrls, (uint8 **) bitstream, (uint32*)timestamp, (int32*)buffer_size, (uint *) use_ext_timestamp, (uint8 *) currYUV) ? true : false;\n}\n\n//////////////////////////////////////////////////////////////////////////////\nvoid  PVM4VDecoder::SetReferenceYUV(uint8 *YUV)\n{\n    PVSetReferenceYUV((VideoDecControls *)iVideoCtrls, YUV);\n}\n\n/////////////////////////////////////////////////////////////////////////////\nvoid PVM4VDecoder::GetVideoDimensions(int32 *display_width, int32 *display_height)\n{\n    PVGetVideoDimensions((VideoDecControls *)iVideoCtrls, display_width, display_height);\n}\n\n/////////////////////////////////////////////////////////////////////////////\nvoid PVM4VDecoder::SetPostProcType(int32 mode)\n{\n    PVSetPostProcType((VideoDecControls *)iVideoCtrls, mode);\n}\n\n/////////////////////////////////////////////////////////////////////////////\nuint32 PVM4VDecoder::GetVideoTimestamp(void)\n{\n    return PVGetVideoTimeStamp((VideoDecControls *)iVideoCtrls);\n}\n\n/////////////////////////////////////////////////////////////////////////////\nbool PVM4VDecoder::IsIFrame(void)\n{\n    return IsIntraFrame((VideoDecControls *)iVideoCtrls) ? true : false;\n}\n\n/////////////////////////////////////////////////////////////////////////////\nvoid PVM4VDecoder::DecPostProcess(uint8 *YUV)\n{\n    PVDecPostProcess((VideoDecControls *)iVideoCtrls, (uint8 *) YUV);\n}\n\n/////////////////////////////////////////////////////////////////////////////\nuint8* PVM4VDecoder::GetDecOutputFrame(void)\n{\n    PVDecPostProcess((VideoDecControls *)iVideoCtrls, NULL);\n    return (uint8 *) PVGetDecOutputFrame((VideoDecControls *)iVideoCtrls);\n}\n\n/////////////////////////////////////////////////////////////////////////////\nbool PVM4VDecoder::ResetVideoDecoder(void)\n{\n    return PVResetVideoDecoder((VideoDecControls *)iVideoCtrls) ? true : false;\n}\n\n/////////////////////////////////////////////////////////////////////////////\nvoid PVM4VDecoder::DecSetReference(uint8 *refYUV, uint32 timestamp)\n{\n    PVDecSetReference((VideoDecControls *)iVideoCtrls, (uint8*)refYUV, timestamp);\n    return ;\n}\n\n/////////////////////////////////////////////////////////////////////////////\nvoid PVM4VDecoder::DecSetEnhReference(uint8 *refYUV, uint32 timestamp)\n{\n    PVDecSetEnhReference((VideoDecControls *)iVideoCtrls, (uint8*)refYUV, timestamp);\n    return ;\n}\n/////////////////////////////////////////////////////////////////////////////\nuint32 PVM4VDecoder::GetDecBitrate(void)\n{\n    return ((uint32)PVGetDecBitrate((VideoDecControls *)iVideoCtrls));\n}\n\n/////////////////////////////////////////////////////////////////////////////\nuint32 PVM4VDecoder::GetProfileAndLevel(void)\n{\n    VolInfo iVolInfo;\n    if (GetVolInfo(&iVolInfo))\n    {\n        return iVolInfo.profile_level_id;\n    }\n    else\n    {\n        return 0;\n    }\n}\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/pvm4vdecoder_factory.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/**\n * @file pvm4vdecoder_factory.cpp\n * @brief Singleton factory for PVM4VDecoder\n */\n\n#include \"oscl_base.h\"\n\n#include \"pvm4vdecoder.h\"\n#include \"pvm4vdecoder_factory.h\"\n\n#include \"oscl_error_codes.h\"\n#include \"oscl_exception.h\"\n\n// Use default DLL entry point\n#include \"oscl_dll.h\"\n\nOSCL_DLL_ENTRY_POINT_DEFAULT()\n\n\n////////////////////////////////////////////////////////////////////////////\nOSCL_EXPORT_REF PVVideoDecoderInterface* PVM4VDecoderFactory::CreatePVM4VDecoder()\n{\n    PVVideoDecoderInterface* videodec = NULL;\n    videodec = PVM4VDecoder::New();\n    if (videodec == NULL)\n    {\n        OSCL_LEAVE(OsclErrNoMemory);\n    }\n    return videodec;\n}\n\n////////////////////////////////////////////////////////////////////////////\nOSCL_EXPORT_REF bool PVM4VDecoderFactory::DeletePVM4VDecoder(PVVideoDecoderInterface* aVideoDec)\n{\n    if (aVideoDec)\n    {\n        OSCL_DELETE(aVideoDec);\n        return true;\n    }\n\n    return false;\n}\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/scaling.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n    extern const int32 scale[63];\n\n#define PV_GET_ROW(a,b) ((a)/(b))\n\n    /*----------------------------------------------------------------------------\n    ; SIMPLE TYPEDEF'S\n    ----------------------------------------------------------------------------*/\n\n    /*----------------------------------------------------------------------------\n    ; ENUMERATED TYPEDEF'S\n    ----------------------------------------------------------------------------*/\n\n    /*----------------------------------------------------------------------------\n    ; STRUCTURES TYPEDEF'S\n    ----------------------------------------------------------------------------*/\n\n    /*----------------------------------------------------------------------------\n    ; GLOBAL FUNCTION DEFINITIONS\n    ; Function Prototype declaration\n    ----------------------------------------------------------------------------*/\n\n    /*----------------------------------------------------------------------------\n    ; END\n    ----------------------------------------------------------------------------*/\n#ifdef __cplusplus\n}\n#endif\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/scaling_tab.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n\n/*----------------------------------------------------------------------------\n; INCLUDES\n----------------------------------------------------------------------------*/\n\n#include    \"mp4dec_api.h\"\n#include    \"mp4def.h\"\n#include    \"scaling.h\"\n/*----------------------------------------------------------------------------\n; MACROS\n; Define module specific macros here\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; DEFINES\n; Include all pre-processor statements here. Include conditional\n; compile variables also.\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; LOCAL FUNCTION DEFINITIONS\n; Function Prototype declaration\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; LOCAL STORE/BUFFER/POINTER DEFINITIONS\n; Variable declaration - defined here and used outside this module\n----------------------------------------------------------------------------*/\n\n/* this scaling can be used for dividing values up to 3292             07/10/01 */\nconst int32 scale[63] = {0, 262145, 131073, 87382, 65537, 52430, 43692, 37450, 32769, 29128,\n                         26215, 23832, 21846, 20166, 18726, 17477, 16385, 15421, 14565, 13798,\n                         13108, 12484, 11917, 11399, 10924, 10487, 10083, 9710, 9363, 9040,\n                         8739, 8457, 8193, 7945, 7711, 7491, 7283, 7086, 6900, 6723, 6555, 6395,\n                         6243, 6097, 5959, 5826, 5700, 5579, 5462, 5351, 5244, 5141, 5042, 4947, 4856,\n                         4767, 4682, 4600, 4521, 4444, 4370, 4298, 4229\n                        };\n/*----------------------------------------------------------------------------\n; EXTERNAL FUNCTION REFERENCES\n; Declare functions defined elsewhere and referenced in this module\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES\n; Declare variables used in this module but defined elsewhere\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; FUNCTION CODE\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; Define all local variables\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; Function body here\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; Return nothing or data or data pointer\n----------------------------------------------------------------------------*/\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/vlc_dec_tab.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*\n*     -------------------------------------------------------------------   *\n*                    MPEG-4 Simple Profile Video Decoder                    *\n*     -------------------------------------------------------------------   *\n*\n* This software module was originally developed by\n*\n*   Paulo Nunes (IST / ACTS-MoMuSyS)\n*\n* and edited by\n*\n*   Robert Danielsen (Telenor / ACTS-MoMuSyS)\n*\n* in the course of development of the MPEG-4 Video (ISO/IEC 14496-2) standard.\n* This software module is an implementation of a part of one or more MPEG-4\n* Video (ISO/IEC 14496-2) tools as specified by the MPEG-4 Video (ISO/IEC\n* 14496-2) standard.\n*\n* ISO/IEC gives users of the MPEG-4 Video (ISO/IEC 14496-2) standard free\n* license to this software module or modifications thereof for use in hardware\n* or software products claiming conformance to the MPEG-4 Video (ISO/IEC\n* 14496-2) standard.\n*\n* Those intending to use this software module in hardware or software products\n* are advised that its use may infringe existing patents. The original\n* developer of this software module and his/her company, the subsequent\n* editors and their companies, and ISO/IEC have no liability for use of this\n* software module or modifications thereof in an implementation. Copyright is\n* not released for non MPEG-4 Video (ISO/IEC 14496-2) Standard conforming\n* products.\n*\n* ACTS-MoMuSys partners retain full right to use the code for his/her own\n* purpose, assign or donate the code to a third party and to inhibit third\n* parties from using the code for non MPEG-4 Video (ISO/IEC 14496-2) Standard\n* conforming products. This copyright notice must be included in all copies or\n* derivative works.\n*\n* Copyright (c) 1996\n*\n*****************************************************************************\n***********************************************************HeaderBegin*******\n*\n* File: vlc_dec_tab.h\n*\n* Author:   Paulo Nunes (IST) - Paulo.Nunes@it.ist.utl.pt\n* Created:  1-Mar-96\n*\n* Description: This file contains the VLC tables for module which deals\n*       with VLC decoding.\n*\n* Notes:    This file was created based on tmndecode\n*       Written by Karl Olav Lillevold <kol@nta.no>,\n*       1995 Telenor R&D.\n*       Donated to the Momusys-project as background code by\n*       Telenor.\n*\n*       based on mpeg2decode, (C) 1994, MPEG Software Simulation Group\n*       and mpeg2play, (C) 1994 Stefan Eckart\n*                         <stefan@lis.e-technik.tu-muenchen.de>\n*\n*\n* Modified:  9-May-96 Paulo Nunes: Reformatted. New headers.\n*       14-May-96 Paulo Nunes: Changed TMNMVtabs according to VM2.1.\n*   04.11.96 Robert Danielsen: Added three new tables for coding\n*           of Intra luminance coefficients (VM 4.0)\n*      01.05.97 Luis Ducla-Soares: added VM7.0 Reversible VLC tables (RVLC).\n*      13.05.97 Minhua Zhou: added VlC tables for CBPYtab2 CBPYtab3,\n*   revised  CBPYtab\n*\n***********************************************************HeaderEnd*********\n\nThis module is a header file for \"vlc_decode.c\".  The table data actually\nresides in \"vlc_tab.c\".\n\n\n------------------------------------------------------------------------------\n*/\n\n/*----------------------------------------------------------------------------\n; CONTINUE ONLY IF NOT ALREADY DEFINED\n----------------------------------------------------------------------------*/\n#ifndef vlc_dec_tab_H\n#define vlc_dec_tab_H\n\n/*----------------------------------------------------------------------------\n; INCLUDES\n----------------------------------------------------------------------------*/\n#include \"mp4def.h\"\n\n/*----------------------------------------------------------------------------\n; MACROS\n; Define module specific macros here\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; DEFINES\n; Include all pre-processor statements here.\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; EXTERNAL VARIABLES REFERENCES\n; Declare variables used in this module but defined elsewhere\n----------------------------------------------------------------------------*/\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n    extern const VLCshorttab PV_TMNMVtab0[];\n\n    extern const VLCshorttab PV_TMNMVtab1[];\n\n    extern const VLCshorttab PV_TMNMVtab2[];\n\n    extern const VLCshorttab PV_MCBPCtab[];\n\n#ifdef PV_ANNEX_IJKT_SUPPORT\n    extern const VLCshorttab PV_MCBPCtab1[];\n#endif\n    extern const VLCshorttab PV_MCBPCtabintra[];\n\n    /* Table for separate mode MCBPC, for coding DQUANT-flag and CBPC */\n\n    extern const VLCshorttab MCBPCtab_sep[32];\n\n    extern const VLCshorttab PV_CBPYtab[48];\n\n    extern const VLCshorttab CBPYtab2[16];\n\n    extern const VLCshorttab CBPYtab3[64];\n\n    extern const VLCtab2 PV_DCT3Dtab0[];\n\n\n    extern const VLCtab2 PV_DCT3Dtab1[];\n\n\n    extern const VLCtab2 PV_DCT3Dtab2[];\n\n    /* New tables for Intra luminance blocks */\n\n    extern const VLCtab2 PV_DCT3Dtab3[];\n\n    extern const VLCtab2 PV_DCT3Dtab4[];\n\n    extern const VLCtab2 PV_DCT3Dtab5[];\n#ifdef PV_ANNEX_IJKT_SUPPORT\n    /* Annex I tables */\n    extern const VLCtab2 PV_DCT3Dtab6[];\n\n    extern const VLCtab2 PV_DCT3Dtab7[];\n\n    extern const VLCtab2 PV_DCT3Dtab8[];\n#endif\n    /* RVLC tables */\n    extern const int ptrRvlcTab[];\n\n    extern const VLCtab2 RvlcDCTtabIntra[];\n\n    extern const VLCtab2 RvlcDCTtabInter[];\n\n    /*----------------------------------------------------------------------------\n    ; SIMPLE TYPEDEF'S\n    ----------------------------------------------------------------------------*/\n\n\n    /*----------------------------------------------------------------------------\n    ; ENUMERATED TYPEDEF'S\n    ----------------------------------------------------------------------------*/\n\n    /*----------------------------------------------------------------------------\n    ; STRUCTURES TYPEDEF'S\n    ----------------------------------------------------------------------------*/\n\n    /*----------------------------------------------------------------------------\n    ; GLOBAL FUNCTION DEFINITIONS\n    ; Function Prototype declaration\n    ----------------------------------------------------------------------------*/\n\n\n    /*----------------------------------------------------------------------------\n    ; END\n    ----------------------------------------------------------------------------*/\n#endif\n\n#ifdef __cplusplus\n}\n#endif\n\n\n\n\n\n\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/vlc_decode.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*\n*     -------------------------------------------------------------------       *\n*                    MPEG-4 Simple Profile Video Decoder                        *\n*     -------------------------------------------------------------------       *\n*\n* This software module was originally developed by\n*\n*   Paulo Nunes (IST / ACTS-MoMuSyS)\n*   Robert Danielsen (Telenor / ACTS-MoMuSyS)\n*\n* in the course of development of the MPEG-4 Video (ISO/IEC 14496-2) standard.\n* This software module is an implementation of a part of one or more MPEG-4\n* Video (ISO/IEC 14496-2) tools as specified by the MPEG-4 Video (ISO/IEC\n* 14496-2) standard.\n*\n* ISO/IEC gives users of the MPEG-4 Video (ISO/IEC 14496-2) standard free\n* license to this software module or modifications thereof for use in hardware\n* or software products claiming conformance to the MPEG-4 Video (ISO/IEC\n* 14496-2) standard.\n*\n* Those intending to use this software module in hardware or software products\n* are advised that its use may infringe existing patents. The original\n* developer of this software module and his/her company, the subsequent\n* editors and their companies, and ISO/IEC have no liability for use of this\n* software module or modifications thereof in an implementation. Copyright is\n* not released for non MPEG-4 Video (ISO/IEC 14496-2) Standard conforming\n* products.\n*\n* ACTS-MoMuSys partners retain full right to use the code for his/her own\n* purpose, assign or donate the code to a third party and to inhibit third\n* parties from using the code for non MPEG-4 Video (ISO/IEC 14496-2) Standard\n* conforming products. This copyright notice must be included in all copies or\n* derivative works.\n*\n* Copyright (c) 1996\n*\n*****************************************************************************/\n\n/***********************************************************HeaderBegin*******\n*\n* File: vlc_dec.c\n*\n* Author:   Paulo Nunes (IST) - Paulo.Nunes@lx.it.pt\n* Created:  1-Mar-96\n*\n* Description: This file contains the VLC functions needed to decode a\n*       bitstream.\n*\n* Notes:\n*       The functions contained in this file were adapted from\n*       tmndecode\n*       Written by Karl Olav Lillevold <kol@nta.no>,\n*       1995 Telenor R&D.\n*       Donated to the Momusys-project as background code by\n*       Telenor.\n*\n*       based on mpeg2decode, (C) 1994, MPEG Software Simulation Group\n*       and mpeg2play, (C) 1994 Stefan Eckart\n*                   <stefan@lis.e-technik.tu-muenchen.de>\n*\n*\n* Modified: 9-May-96 Paulo Nunes: Reformatted. New headers.\n*              17-Jan-97 Jan De Lameillieure (HHI) : corrected in\n*              01.05.97 Luis Ducla-Soares: added RvlcDecTCOEF() to allow decoding\n*                                          of Reversible VLCs.\n*       09.03.98 Paulo Nunes: Cleaning.\n*\n***********************************************************HeaderEnd*********/\n\n#include \"mp4dec_lib.h\"\n#include \"vlc_dec_tab.h\"\n#include \"vlc_decode.h\"\n#include \"bitstream.h\"\n#include \"max_level.h\"\n\n\n/* ====================================================================== /\n    Function : DecodeUserData()\n    Date     : 04/10/2000\n    History  :\n    Modified : 04/16/2001 : removed status checking of PV_BitstreamFlushBits\n\n        This is simply a realization of the user_data() function\n        in the ISO/IEC 14496-2 manual.\n/ ====================================================================== */\nPV_STATUS DecodeUserData(BitstreamDecVideo *stream)\n{\n    PV_STATUS status;\n    uint32 code;\n\n    BitstreamReadBits32HC(stream);\n    BitstreamShowBits32(stream, 24, &code);\n\n    while (code != 1)\n    {\n        /* Discard user data for now.   04/05/2000 */\n        BitstreamReadBits16(stream, 8);\n        BitstreamShowBits32(stream, 24, &code);\n        status = BitstreamCheckEndBuffer(stream);\n        if (status == PV_END_OF_VOP) return status;    /*  03/19/2002 */\n    }\n    return PV_SUCCESS;\n}\n\n\n\n/***********************************************************CommentBegin******\n*\n*       3/10/00  : initial modification to the\n*                new PV-Decoder Lib format.\n*       3/29/00  : added return code check to some functions and\n*                optimize the code.\n*\n***********************************************************CommentEnd********/\nPV_STATUS PV_GetMBvectors(VideoDecData *video, uint mode)\n{\n    PV_STATUS status;\n    BitstreamDecVideo *stream = video->bitstream;\n    int  f_code_f = video->currVop->fcodeForward;\n    int  vlc_code_mag;\n\n\n    MOT *mot_x = video->motX;\n    MOT *mot_y = video->motY;\n\n    int k, offset;\n    int x_pos = video->mbnum_col;\n    int y_pos = video->mbnum_row;\n    int doubleWidth = video->nMBPerRow << 1;\n    int pos = (x_pos + y_pos * doubleWidth) << 1;\n    MOT mvx = 0, mvy = 0;\n\n\n    if (f_code_f == 1)\n    {\n#ifdef PV_ANNEX_IJKT_SUPPORT\n        if (mode == MODE_INTER4V || mode == MODE_INTER4V_Q)\n#else\n        if (mode == MODE_INTER4V)\n#endif\n        {\n            for (k = 0; k < 4; k++)\n            {\n                offset = (k & 1) + (k >> 1) * doubleWidth;\n                mv_prediction(video, k, &mvx, &mvy);\n                /* decode component x */\n                status = PV_VlcDecMV(stream, &vlc_code_mag);\n                if (status != PV_SUCCESS)\n                {\n                    return status;\n                }\n\n                mvx += (MOT)vlc_code_mag;\n                mvx = (MOT)(((mvx + 32) & 0x3F) - 32);\n\n\n                status = PV_VlcDecMV(stream, &vlc_code_mag);\n                if (status != PV_SUCCESS)\n                {\n                    return status;\n                }\n\n                mvy += (MOT)vlc_code_mag;\n                mvy = (MOT)(((mvy + 32) & 0x3F) - 32);\n\n                mot_x[pos+offset] = (MOT) mvx;\n                mot_y[pos+offset] = (MOT) mvy;\n            }\n        }\n        else\n        {\n            mv_prediction(video, 0, &mvx, &mvy);\n            /* For PVOPs, field  appears only in MODE_INTER & MODE_INTER_Q */\n            status = PV_VlcDecMV(stream, &vlc_code_mag);\n            if (status != PV_SUCCESS)\n            {\n                return status;\n            }\n\n            mvx += (MOT)vlc_code_mag;\n            mvx = (MOT)(((mvx + 32) & 0x3F) - 32);\n\n\n            status = PV_VlcDecMV(stream, &vlc_code_mag);\n            if (status != PV_SUCCESS)\n            {\n                return status;\n            }\n\n\n            mvy += (MOT)vlc_code_mag;\n            mvy = (MOT)(((mvy + 32) & 0x3F) - 32);\n\n\n            mot_x[pos] = mot_x[pos+1] = (MOT) mvx;\n            mot_y[pos] = mot_y[pos+1] = (MOT) mvy;\n            pos += doubleWidth;\n            mot_x[pos] = mot_x[pos+1] = (MOT) mvx;\n            mot_y[pos] = mot_y[pos+1] = (MOT) mvy;\n        }\n    }\n    else\n    {\n#ifdef PV_ANNEX_IJKT_SUPPORT\n        if (mode == MODE_INTER4V || mode == MODE_INTER4V_Q)\n#else\n        if (mode == MODE_INTER4V)\n#endif\n        {\n            for (k = 0; k < 4; k++)\n            {\n                offset = (k & 1) + (k >> 1) * doubleWidth;\n                mv_prediction(video, k, &mvx, &mvy);\n                status = PV_DecodeMBVec(stream, &mvx, &mvy, f_code_f);\n                mot_x[pos+offset] = (MOT) mvx;\n                mot_y[pos+offset] = (MOT) mvy;\n                if (status != PV_SUCCESS)\n                {\n                    return status;\n                }\n            }\n        }\n        else\n        {\n            mv_prediction(video, 0, &mvx, &mvy);\n            /* For PVOPs, field  appears only in MODE_INTER & MODE_INTER_Q */\n            status = PV_DecodeMBVec(stream, &mvx, &mvy, f_code_f);\n            mot_x[pos] = mot_x[pos+1] = (MOT) mvx;\n            mot_y[pos] = mot_y[pos+1] = (MOT) mvy;\n            pos += doubleWidth;\n            mot_x[pos] = mot_x[pos+1] = (MOT) mvx;\n            mot_y[pos] = mot_y[pos+1] = (MOT) mvy;\n            if (status != PV_SUCCESS)\n            {\n                return status;\n            }\n        }\n    }\n    return PV_SUCCESS;\n}\n\n\n/***********************************************************CommentBegin******\n*       3/10/00  : initial modification to the\n*                new PV-Decoder Lib format.\n*       3/29/00  : added return code check to some functions\n*       5/10/00  : check whether the decoded vector is legal.\n*       4/17/01  : use MOT type\n***********************************************************CommentEnd********/\nPV_STATUS PV_DecodeMBVec(BitstreamDecVideo *stream, MOT *mv_x, MOT *mv_y, int f_code_f)\n{\n    PV_STATUS status;\n    int  vlc_code_magx, vlc_code_magy;\n    int  residualx = 0, residualy = 0;\n\n    /* decode component x */\n    status = PV_VlcDecMV(stream, &vlc_code_magx);\n    if (status != PV_SUCCESS)\n    {\n        return status;\n    }\n\n    if (vlc_code_magx)\n    {\n        residualx = (int) BitstreamReadBits16_INLINE(stream, (int)(f_code_f - 1));\n    }\n\n\n    /* decode component y */\n    status = PV_VlcDecMV(stream, &vlc_code_magy);\n    if (status != PV_SUCCESS)\n    {\n        return status;\n    }\n\n    if (vlc_code_magy)\n    {\n        residualy = (int) BitstreamReadBits16_INLINE(stream, (int)(f_code_f - 1));\n    }\n\n\n    if (PV_DeScaleMVD(f_code_f, residualx, vlc_code_magx, mv_x) != PV_SUCCESS)\n    {\n        return PV_FAIL;\n    }\n\n    if (PV_DeScaleMVD(f_code_f, residualy, vlc_code_magy, mv_y) != PV_SUCCESS)\n    {\n        return PV_FAIL;\n    }\n\n    return PV_SUCCESS;\n}\n\n\n/***********************************************************CommentBegin******\n*       3/31/2000 : initial modification to the new PV-Decoder Lib format.\n*       5/10/2000 : check to see if the decoded vector falls within\n*                           the legal fcode range.\n*\n***********************************************************CommentEnd********/\nPV_STATUS PV_DeScaleMVD(\n    int  f_code,       /* <-- MV range in 1/2 units: 1=32,2=64,...,7=2048     */\n    int  residual,     /* <-- part of the MV Diff. FLC coded                  */\n    int  vlc_code_mag, /* <-- part of the MV Diff. VLC coded                  */\n    MOT  *vector       /* --> Obtained MV component in 1/2 units              */\n)\n{\n    int   half_range = (1 << (f_code + 4));\n    int   mask = (half_range << 1) - 1;\n    int   diff_vector;\n\n\n    if (vlc_code_mag == 0)\n    {\n        diff_vector = vlc_code_mag;\n    }\n    else\n    {\n        diff_vector = ((PV_ABS(vlc_code_mag) - 1) << (f_code - 1)) + residual + 1;\n        if (vlc_code_mag < 0)\n        {\n            diff_vector = -diff_vector;\n        }\n    }\n\n    *vector += (MOT)(diff_vector);\n\n    *vector = (MOT)((*vector + half_range) & mask) - half_range;\n\n    return PV_SUCCESS;\n}\n\n\n\nvoid mv_prediction(\n    VideoDecData *video,\n    int block,\n    MOT *mvx,\n    MOT *mvy\n)\n{\n    /*----------------------------------------------------------------------------\n    ; Define all local variables\n    ----------------------------------------------------------------------------*/\n    MOT *motxdata = video->motX;\n    MOT *motydata = video->motY;\n    int mbnum_col = video->mbnum_col;\n    int mbnum_row = video->mbnum_row;\n    uint8 *slice_nb = video->sliceNo;\n    int nMBPerRow = video->nMBPerRow;\n    int nMVPerRow = nMBPerRow << 1;\n    int mbnum = video->mbnum;\n    int p1x = 0, p2x = 0, p3x = 0;\n    int p1y = 0, p2y = 0, p3y = 0;\n    int rule1 = 0, rule2 = 0, rule3 = 0;\n    int     indx;\n\n    indx = ((mbnum_col << 1) + (block & 1)) + ((mbnum_row << 1)  + (block >> 1)) * nMVPerRow - 1; /* left block */\n\n    if (block & 1)           /* block 1, 3 */\n    {\n        p1x = motxdata[indx];\n        p1y = motydata[indx];\n        rule1 = 1;\n    }\n    else                    /* block 0, 2 */\n    {\n        if (mbnum_col > 0 && slice_nb[mbnum] == slice_nb[mbnum-1])\n        {\n            p1x = motxdata[indx];\n            p1y = motydata[indx];\n            rule1 = 1;\n        }\n    }\n\n    indx = indx + 1 - nMVPerRow; /* upper_block */\n    if (block >> 1)\n    {\n        indx -= (block & 1);\n        p2x = motxdata[indx];\n        p2y = motydata[indx];\n        p3x = motxdata[indx + 1];\n        p3y = motydata[indx + 1];\n        rule2 = rule3 = 1;\n    }\n    else\n    {                           /* block 0,1 */\n        if (mbnum_row)\n        {\n            if (slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow])\n            {\n                p2x = motxdata[indx];\n                p2y = motydata[indx];\n                rule2 = 1;\n            }\n            if (mbnum_col < nMBPerRow - 1 && slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow+1])\n            {\n                indx = indx + 2 - (block & 1);\n                p3x = motxdata[indx];\n                p3y = motydata[indx];\n                rule3 = 1;\n            }\n        }\n    }\n\n    if (rule1 + rule2 + rule3 > 1)\n    {\n        *mvx = (MOT)PV_MEDIAN(p1x, p2x, p3x);\n        *mvy = (MOT)PV_MEDIAN(p1y, p2y, p3y);\n    }\n    else if (rule1 + rule2 + rule3 == 1)\n    {\n        /* two of three are zero */\n        *mvx = (MOT)(p1x + p2x + p3x);\n        *mvy = (MOT)(p1y + p2y + p3y);\n    }\n    else\n    {\n        /* all MBs are outside the VOP */\n        *mvx = *mvy = 0;\n    }\n    /*----------------------------------------------------------------------------\n    ; Return nothing or data or data pointer\n    ----------------------------------------------------------------------------*/\n    return;\n}\n\n/***********************************************************CommentBegin******\n*\n*       3/30/2000 : initial modification to the new PV-Decoder Lib format.\n*       4/16/2001 : removed checking of status for PV_BitstreamFlushBits\n***********************************************************CommentEnd********/\n\nPV_STATUS PV_VlcDecMV(BitstreamDecVideo *stream, int *mv)\n{\n    PV_STATUS status = PV_SUCCESS;\n    uint code;\n\n    BitstreamShow13Bits(stream, &code);\n\n    if (code >> 12)\n    {\n        *mv = 0; /* Vector difference = 0 */\n        PV_BitstreamFlushBits(stream, 1);\n        return PV_SUCCESS;\n    }\n\n    if (code >= 512)\n    {\n        code = (code >> 8) - 2;\n        PV_BitstreamFlushBits(stream, PV_TMNMVtab0[code].len + 1);\n        *mv = PV_TMNMVtab0[code].val;\n        return status;\n    }\n\n    if (code >= 128)\n    {\n        code = (code >> 2) - 32;\n        PV_BitstreamFlushBits(stream, PV_TMNMVtab1[code].len + 1);\n        *mv = PV_TMNMVtab1[code].val;\n        return status;\n    }\n\n    if (code < 4)\n    {\n        *mv = -1;\n        return PV_FAIL;\n    }\n\n    code -= 4;\n\n    PV_BitstreamFlushBits(stream, PV_TMNMVtab2[code].len + 1);\n\n    *mv = PV_TMNMVtab2[code].val;\n    return status;\n}\n\n\n/***********************************************************CommentBegin******\n*       3/30/2000 : initial modification to the new PV-Decoder Lib\n*                           format and the change of error-handling method.\n*       4/16/01   : removed status checking of PV_BitstreamFlushBits\n***********************************************************CommentEnd********/\n\nint PV_VlcDecMCBPC_com_intra(BitstreamDecVideo *stream)\n{\n    uint code;\n\n    BitstreamShowBits16(stream, 9, &code);\n\n\n    if (code < 8)\n    {\n        return VLC_CODE_ERROR;\n    }\n\n    code >>= 3;\n\n    if (code >= 32)\n    {\n        PV_BitstreamFlushBits(stream, 1);\n        return 3;\n    }\n\n    PV_BitstreamFlushBits(stream, PV_MCBPCtabintra[code].len);\n\n    return PV_MCBPCtabintra[code].val;\n}\n\n\n/***********************************************************CommentBegin******\n*\n*       3/30/2000 : initial modification to the new PV-Decoder Lib\n*                           format and the change of error-handling method.\n*       4/16/2001 : removed checking of return status of PV_BitstreamFlushBits\n***********************************************************CommentEnd********/\n\nint PV_VlcDecMCBPC_com_inter(BitstreamDecVideo *stream)\n{\n    uint code;\n\n    BitstreamShowBits16(stream, 9, &code);\n\n    if (code == 0)\n    {\n        return VLC_CODE_ERROR;\n    }\n    else if (code >= 256)\n    {\n        PV_BitstreamFlushBits(stream, 1);\n        return 0;\n    }\n\n    PV_BitstreamFlushBits(stream, PV_MCBPCtab[code].len);\n    return PV_MCBPCtab[code].val;\n}\n\n#ifdef PV_ANNEX_IJKT_SUPPORT\nint PV_VlcDecMCBPC_com_inter_H263(BitstreamDecVideo *stream)\n{\n    uint code;\n\n    BitstreamShow13Bits(stream, &code);\n\n    if (code == 0)\n    {\n        return VLC_CODE_ERROR;\n    }\n    else if (code >= 4096)\n    {\n        PV_BitstreamFlushBits(stream, 1);\n        return 0;\n    }\n    if (code >= 16)\n    {\n        PV_BitstreamFlushBits(stream, PV_MCBPCtab[code >> 4].len);\n        return PV_MCBPCtab[code >> 4].val;\n    }\n    else\n    {\n        PV_BitstreamFlushBits(stream, PV_MCBPCtab1[code - 8].len);\n        return PV_MCBPCtab1[code - 8].val;\n    }\n}\n#endif\n/***********************************************************CommentBegin******\n*       3/30/2000 : initial modification to the new PV-Decoder Lib\n*                           format and the change of error-handling method.\n*       4/16/2001 : removed status checking for PV_BitstreamFlushBits\n***********************************************************CommentEnd********/\n\nint PV_VlcDecCBPY(BitstreamDecVideo *stream, int intra)\n{\n    int CBPY = 0;\n    uint code;\n\n    BitstreamShowBits16(stream, 6, &code);\n\n\n    if (code < 2)\n    {\n        return -1;\n    }\n    else if (code >= 48)\n    {\n        PV_BitstreamFlushBits(stream, 2);\n        CBPY = 15;\n    }\n    else\n    {\n        PV_BitstreamFlushBits(stream, PV_CBPYtab[code].len);\n        CBPY = PV_CBPYtab[code].val;\n    }\n\n    if (intra == 0) CBPY = 15 - CBPY;\n    CBPY = CBPY & 15;\n    return CBPY;\n}\n\n\n/***********************************************************CommentBegin******\n*       3/31/2000 : initial modification to the new PV-Decoder Lib format.\n*\n*       8/23/2000 : optimize the function by removing unnecessary BitstreamShowBits()\n*                       function calls.\n*\n*       9/6/2000 : change the API to check for end-of-buffer for proper\n*                           termination of decoding process.\n***********************************************************CommentEnd********/\nPV_STATUS PV_VlcDecIntraDCPredSize(BitstreamDecVideo *stream, int compnum, uint *DC_size)\n{\n    PV_STATUS status = PV_FAIL;      /*  07/09/01 */\n    uint  code;\n\n    *DC_size = 0;\n    if (compnum < 4)  /* luminance block */\n    {\n\n        BitstreamShowBits16(stream, 11, &code);\n\n        if (code == 1)\n        {\n            *DC_size = 12;\n            PV_BitstreamFlushBits(stream, 11);\n            return PV_SUCCESS;\n        }\n        code >>= 1;\n        if (code == 1)\n        {\n            *DC_size = 11;\n            PV_BitstreamFlushBits(stream, 10);\n            return PV_SUCCESS;\n        }\n        code >>= 1;\n        if (code == 1)\n        {\n            *DC_size = 10;\n            PV_BitstreamFlushBits(stream, 9);\n            return PV_SUCCESS;\n        }\n\n        code >>= 1;\n        if (code == 1)\n        {\n            *DC_size = 9;\n            PV_BitstreamFlushBits(stream, 8);\n            return PV_SUCCESS;\n        }\n\n        code >>= 1;\n        if (code == 1)\n        {\n            *DC_size = 8;\n            PV_BitstreamFlushBits(stream, 7);\n            return PV_SUCCESS;\n        }\n\n        code >>= 1;\n        if (code == 1)\n        {\n            *DC_size = 7;\n            PV_BitstreamFlushBits(stream, 6);\n            return PV_SUCCESS;\n        }\n\n        code >>= 1;\n        if (code == 1)\n        {\n            *DC_size = 6;\n            PV_BitstreamFlushBits(stream, 5);\n            return PV_SUCCESS;\n        }\n\n        code >>= 1;\n        if (code == 1)\n        {\n            *DC_size = 5;\n            PV_BitstreamFlushBits(stream, 4);\n            return PV_SUCCESS;\n        }\n\n        code >>= 1;\n        if (code == 1)\n        {\n            *DC_size = 4;\n            PV_BitstreamFlushBits(stream, 3);\n            return PV_SUCCESS;\n        }\n        else if (code == 2)\n        {\n            *DC_size = 3;\n            PV_BitstreamFlushBits(stream, 3);\n            return PV_SUCCESS;\n        }\n        else if (code == 3)\n        {\n            *DC_size = 0;\n            PV_BitstreamFlushBits(stream, 3);\n            return PV_SUCCESS;\n        }\n\n        code >>= 1;\n        if (code == 2)\n        {\n            *DC_size = 2;\n            PV_BitstreamFlushBits(stream, 2);\n            return PV_SUCCESS;\n        }\n        else if (code == 3)\n        {\n            *DC_size = 1;\n            PV_BitstreamFlushBits(stream, 2);\n            return PV_SUCCESS;\n        }\n    }\n    else /* chrominance block */\n    {\n\n        BitstreamShow13Bits(stream, &code);\n        code >>= 1;\n        if (code == 1)\n        {\n            *DC_size = 12;\n            PV_BitstreamFlushBits(stream, 12);\n            return PV_SUCCESS;\n        }\n\n        code >>= 1;\n        if (code == 1)\n        {\n            *DC_size = 11;\n            PV_BitstreamFlushBits(stream, 11);\n            return PV_SUCCESS;\n        }\n\n        code >>= 1;\n        if (code == 1)\n        {\n            *DC_size = 10;\n            PV_BitstreamFlushBits(stream, 10);\n            return PV_SUCCESS;\n        }\n\n        code >>= 1;\n        if (code == 1)\n        {\n            *DC_size = 9;\n            PV_BitstreamFlushBits(stream, 9);\n            return PV_SUCCESS;\n        }\n\n        code >>= 1;\n        if (code == 1)\n        {\n            *DC_size = 8;\n            PV_BitstreamFlushBits(stream, 8);\n            return PV_SUCCESS;\n        }\n\n        code >>= 1;\n        if (code == 1)\n        {\n            *DC_size = 7;\n            PV_BitstreamFlushBits(stream, 7);\n            return PV_SUCCESS;\n        }\n\n        code >>= 1;\n        if (code == 1)\n        {\n            *DC_size = 6;\n            PV_BitstreamFlushBits(stream, 6);\n            return PV_SUCCESS;\n        }\n\n        code >>= 1;\n        if (code == 1)\n        {\n            *DC_size = 5;\n            PV_BitstreamFlushBits(stream, 5);\n            return PV_SUCCESS;\n        }\n\n        code >>= 1;\n        if (code == 1)\n        {\n            *DC_size = 4;\n            PV_BitstreamFlushBits(stream, 4);\n            return PV_SUCCESS;\n        }\n\n        code >>= 1;\n        if (code == 1)\n        {\n            *DC_size = 3;\n            PV_BitstreamFlushBits(stream, 3);\n            return PV_SUCCESS;\n        }\n\n        code >>= 1;\n        {\n            *DC_size = (int)(3 - code);\n            PV_BitstreamFlushBits(stream, 2);\n            return PV_SUCCESS;\n        }\n    }\n\n    return status;\n}\n\n/***********************************************************CommentBegin******\n*\n*\n*       3/30/2000 : initial modification to the new PV-Decoder Lib\n*                           format and the change of error-handling method.\n*\n***********************************************************CommentEnd********/\n\n\n\nPV_STATUS VlcDecTCOEFIntra(BitstreamDecVideo *stream, Tcoef *pTcoef)\n{\n    uint code;\n    const VLCtab2 *tab;\n\n    BitstreamShow13Bits(stream, &code);\n\n    /* 10/17/2000, perform a little bit better on ARM by putting the whole function in VlcDecTCOEFFIntra */\n    /*  if(GetTcoeffIntra(code,pTcoef,&tab,stream)!=PV_SUCCESS) return status;*/\n    if (code >= 1024)\n    {\n        tab = &PV_DCT3Dtab3[(code >> 6) - 16];\n    }\n    else\n    {\n        if (code >= 256)\n        {\n            tab = &PV_DCT3Dtab4[(code >> 3) - 32];\n        }\n        else\n        {\n            if (code >= 16)\n            {\n                tab = &PV_DCT3Dtab5[(code>>1) - 8];\n            }\n            else\n            {\n                return PV_FAIL;\n            }\n        }\n    }\n\n    PV_BitstreamFlushBits(stream, tab->len + 1);\n    pTcoef->sign = (code >> (12 - tab->len)) & 1;\n    pTcoef->run = (uint) tab->run; //(tab->val >> 8) & 255;\n    pTcoef->level = (int) tab->level; //tab->val & 255;\n    pTcoef->last = (uint) tab->last; //(tab->val >> 16) & 1;\n\n\n    /* the following is modified for 3-mode escape -- boon */\n    if (tab->level != 0xFF)\n    {\n        return PV_SUCCESS;\n    }\n\n    //if (((tab->run<<8)|(tab->level)|(tab->last<<16)) == VLC_ESCAPE_CODE)\n\n    if (!pTcoef->sign)\n    {\n        /* first escape mode. level is offset */\n        BitstreamShow13Bits(stream, &code);\n\n        /* 10/17/2000, perform a little bit better on ARM by putting the whole function in VlcDecTCOEFFIntra */\n        /*          if(GetTcoeffIntra(code,pTcoef,&tab,stream)!=PV_SUCCESS) return status;*/\n        if (code >= 1024)\n        {\n            tab = &PV_DCT3Dtab3[(code >> 6) - 16];\n        }\n        else\n        {\n            if (code >= 256)\n            {\n                tab = &PV_DCT3Dtab4[(code >> 3) - 32];\n            }\n            else\n            {\n                if (code >= 16)\n                {\n                    tab = &PV_DCT3Dtab5[(code>>1) - 8];\n                }\n                else\n                {\n                    return PV_FAIL;\n                }\n            }\n        }\n\n        PV_BitstreamFlushBits(stream, tab->len + 1);\n\n        /* sign bit */\n        pTcoef->sign = (code >> (12 - tab->len)) & 1;\n        pTcoef->run = (uint)tab->run; //(tab->val >> 8) & 255;\n        pTcoef->level = (int)tab->level; //tab->val & 255;\n        pTcoef->last = (uint)tab->last; //(tab->val >> 16) & 1;\n\n\n        /* need to add back the max level */\n        if ((pTcoef->last == 0 && pTcoef->run > 14) || (pTcoef->last == 1 && pTcoef->run > 20))\n        {\n            return PV_FAIL;\n        }\n        pTcoef->level = pTcoef->level + intra_max_level[pTcoef->last][pTcoef->run];\n\n\n    }\n    else\n    {\n        uint run_offset;\n        run_offset = BitstreamRead1Bits_INLINE(stream);\n\n        if (!run_offset)\n        {\n            /* second escape mode. run is offset */\n            BitstreamShow13Bits(stream, &code);\n\n            /* 10/17/2000, perform a little bit better on ARM by putting the whole function in VlcDecTCOEFFIntra */\n            /*              if(GetTcoeffIntra(code,pTcoef,&tab,stream)!=PV_SUCCESS) return status;*/\n            if (code >= 1024)\n            {\n                tab = &PV_DCT3Dtab3[(code >> 6) - 16];\n            }\n            else\n            {\n                if (code >= 256)\n                {\n                    tab = &PV_DCT3Dtab4[(code >> 3) - 32];\n                }\n                else\n                {\n                    if (code >= 16)\n                    {\n                        tab = &PV_DCT3Dtab5[(code>>1) - 8];\n                    }\n                    else\n                    {\n                        return PV_FAIL;\n                    }\n                }\n            }\n\n            PV_BitstreamFlushBits(stream, tab->len + 1);\n            /* sign bit */\n            pTcoef->sign = (code >> (12 - tab->len)) & 1;\n            pTcoef->run = (uint)tab->run; //(tab->val >> 8) & 255;\n            pTcoef->level = (int)tab->level; //tab->val & 255;\n            pTcoef->last = (uint)tab->last; //(tab->val >> 16) & 1;\n\n\n\n            /* need to add back the max run */\n            if (pTcoef->last)\n            {\n                if (pTcoef->level > 8)\n                {\n                    return PV_FAIL;\n                }\n                pTcoef->run = pTcoef->run + intra_max_run1[pTcoef->level] + 1;\n            }\n            else\n            {\n                if (pTcoef->level > 27)\n                {\n                    return PV_FAIL;\n                }\n                pTcoef->run = pTcoef->run + intra_max_run0[pTcoef->level] + 1;\n            }\n\n\n        }\n        else\n        {\n\n            code = BitstreamReadBits16_INLINE(stream, 8);\n            pTcoef->last = code >> 7;\n            pTcoef->run = (code >> 1) & 0x3F;\n            pTcoef->level = (int)(BitstreamReadBits16_INLINE(stream, 13) >> 1);\n\n            if (pTcoef->level >= 2048)\n            {\n                pTcoef->sign = 1;\n                pTcoef->level = 4096 - pTcoef->level;\n            }\n            else\n            {\n                pTcoef->sign = 0;\n            }\n        } /* flc */\n    }\n\n    return PV_SUCCESS;\n\n} /* VlcDecTCOEFIntra */\n\nPV_STATUS VlcDecTCOEFInter(BitstreamDecVideo *stream, Tcoef *pTcoef)\n{\n    uint code;\n    const VLCtab2 *tab;\n\n    BitstreamShow13Bits(stream, &code);\n\n    /* 10/17/2000, perform a little bit better on ARM by putting the whole function in VlcDecTCOEFFInter */\n    /*  if(GetTcoeffInter(code,pTcoef,&tab,stream)!=PV_SUCCESS) return status;*/\n    if (code >= 1024)\n    {\n        tab = &PV_DCT3Dtab0[(code >> 6) - 16];\n    }\n    else\n    {\n        if (code >= 256)\n        {\n            tab = &PV_DCT3Dtab1[(code >> 3) - 32];\n        }\n        else\n        {\n            if (code >= 16)\n            {\n                tab = &PV_DCT3Dtab2[(code>>1) - 8];\n            }\n            else\n            {\n                return PV_FAIL;\n            }\n        }\n    }\n    PV_BitstreamFlushBits(stream, tab->len + 1);\n    pTcoef->sign = (code >> (12 - tab->len)) & 1;\n    pTcoef->run = (uint)tab->run;     //(tab->val >> 4) & 255;\n    pTcoef->level = (int)tab->level; //tab->val & 15;\n    pTcoef->last = (uint)tab->last;   //(tab->val >> 12) & 1;\n\n    /* the following is modified for 3-mode escape -- boon */\n    if (tab->run != 0xBF)\n    {\n        return PV_SUCCESS;\n    }\n    //if (((tab->run<<4)|(tab->level)|(tab->last<<12)) == VLC_ESCAPE_CODE)\n\n\n    if (!pTcoef->sign)\n    {\n        /* first escape mode. level is offset */\n        BitstreamShow13Bits(stream, &code);\n\n        /* 10/17/2000, perform a little bit better on ARM by putting the whole function in VlcDecTCOEFFInter */\n        /*          if(GetTcoeffInter(code,pTcoef,&tab,stream)!=PV_SUCCESS) return status;*/\n        if (code >= 1024)\n        {\n            tab = &PV_DCT3Dtab0[(code >> 6) - 16];\n        }\n        else\n        {\n            if (code >= 256)\n            {\n                tab = &PV_DCT3Dtab1[(code >> 3) - 32];\n            }\n            else\n            {\n                if (code >= 16)\n                {\n                    tab = &PV_DCT3Dtab2[(code>>1) - 8];\n                }\n                else\n                {\n                    return PV_FAIL;\n                }\n            }\n        }\n        PV_BitstreamFlushBits(stream, tab->len + 1);\n        pTcoef->sign = (code >> (12 - tab->len)) & 1;\n        pTcoef->run = (uint)tab->run;     //(tab->val >> 4) & 255;\n        pTcoef->level = (int)tab->level; //tab->val & 15;\n        pTcoef->last = (uint)tab->last;   //(tab->val >> 12) & 1;\n\n        /* need to add back the max level */\n        if ((pTcoef->last == 0 && pTcoef->run > 26) || (pTcoef->last == 1 && pTcoef->run > 40))\n        {\n            return PV_FAIL;\n        }\n        pTcoef->level = pTcoef->level + inter_max_level[pTcoef->last][pTcoef->run];\n    }\n    else\n    {\n        uint run_offset;\n        run_offset = BitstreamRead1Bits_INLINE(stream);\n\n        if (!run_offset)\n        {\n            /* second escape mode. run is offset */\n            BitstreamShow13Bits(stream, &code);\n\n            /* 10/17/2000, perform a little bit better on ARM by putting the whole function in VlcDecTCOEFFInter */\n            /*if(GetTcoeffInter(code,pTcoef,&tab,stream)!=PV_SUCCESS) return status;*/\n            if (code >= 1024)\n            {\n                tab = &PV_DCT3Dtab0[(code >> 6) - 16];\n            }\n            else\n            {\n                if (code >= 256)\n                {\n                    tab = &PV_DCT3Dtab1[(code >> 3) - 32];\n                }\n                else\n                {\n                    if (code >= 16)\n                    {\n                        tab = &PV_DCT3Dtab2[(code>>1) - 8];\n                    }\n                    else\n                    {\n                        return PV_FAIL;\n                    }\n                }\n            }\n            PV_BitstreamFlushBits(stream, tab->len + 1);\n            pTcoef->sign = (code >> (12 - tab->len)) & 1;\n            pTcoef->run = (uint)tab->run;     //(tab->val >> 4) & 255;\n            pTcoef->level = (int)tab->level; //tab->val & 15;\n            pTcoef->last = (uint)tab->last;   //(tab->val >> 12) & 1;\n\n            /* need to add back the max run */\n            if (pTcoef->last)\n            {\n                if (pTcoef->level > 3)\n                {\n                    return PV_FAIL;\n                }\n                pTcoef->run = pTcoef->run + inter_max_run1[pTcoef->level] + 1;\n            }\n            else\n            {\n                if (pTcoef->level > 12)\n                {\n                    return PV_FAIL;\n                }\n                pTcoef->run = pTcoef->run + inter_max_run0[pTcoef->level] + 1;\n            }\n        }\n        else\n        {\n\n            code = BitstreamReadBits16_INLINE(stream, 8);\n            pTcoef->last = code >> 7;\n            pTcoef->run = (code >> 1) & 0x3F;\n            pTcoef->level = (int)(BitstreamReadBits16_INLINE(stream, 13) >> 1);\n\n\n\n            if (pTcoef->level >= 2048)\n            {\n                pTcoef->sign = 1;\n                pTcoef->level = 4096 - pTcoef->level;\n            }\n            else\n            {\n                pTcoef->sign = 0;\n            }\n        } /* flc */\n    }\n\n    return PV_SUCCESS;\n\n} /* VlcDecTCOEFInter */\n\n/*=======================================================\n    Function:   VlcDecTCOEFShortHeader()\n    Date    :   04/27/99\n    Purpose :   New function used in decoding of video planes\n                with short header\n    Modified:   05/23/2000\n                for new decoder structure.\n=========================================================*/\nPV_STATUS VlcDecTCOEFShortHeader(BitstreamDecVideo *stream, Tcoef *pTcoef/*, int intra*/)\n{\n    uint code;\n    const VLCtab2 *tab;\n\n    BitstreamShow13Bits(stream, &code);\n\n    /*intra = 0;*/\n\n    if (code >= 1024) tab = &PV_DCT3Dtab0[(code >> 6) - 16];\n    else\n    {\n        if (code >= 256) tab = &PV_DCT3Dtab1[(code >> 3) - 32];\n        else\n        {\n            if (code >= 16) tab = &PV_DCT3Dtab2[(code>>1) - 8];\n            else return PV_FAIL;\n        }\n    }\n\n    PV_BitstreamFlushBits(stream, tab->len + 1);\n    pTcoef->sign = (code >> (12 - tab->len)) & 1;\n    pTcoef->run = (uint)tab->run;//(tab->val >> 4) & 255;\n    pTcoef->level = (int)tab->level;//tab->val & 15;\n    pTcoef->last = (uint)tab->last;//(tab->val >> 12) & 1;\n\n    /* the following is modified for 3-mode escape -- boon */\n    if (((tab->run << 4) | (tab->level) | (tab->last << 12)) != VLC_ESCAPE_CODE)    /* ESCAPE */\n    {\n        return PV_SUCCESS;\n    }\n\n\n    /* escape mode 4 - H.263 type */\n    pTcoef->last = pTcoef->sign; /* Last */\n    pTcoef->run = BitstreamReadBits16_INLINE(stream, 6); /* Run */\n    pTcoef->level = (int) BitstreamReadBits16_INLINE(stream, 8); /* Level */\n\n    if (pTcoef->level == 0 || pTcoef->level == 128)\n    {\n        return PV_FAIL;\n    }\n\n    if (pTcoef->level > 128)\n    {\n        pTcoef->sign = 1;\n        pTcoef->level = 256 - pTcoef->level;\n    }\n    else\n    {\n        pTcoef->sign = 0;\n    }\n\n\n\n    return PV_SUCCESS;\n\n}   /* VlcDecTCOEFShortHeader */\n\n\n#ifdef PV_ANNEX_IJKT_SUPPORT\nPV_STATUS VlcDecTCOEFShortHeader_AnnexI(BitstreamDecVideo *stream, Tcoef *pTcoef/*, int intra*/)\n{\n    uint code;\n    const VLCtab2 *tab;\n\n    BitstreamShow13Bits(stream, &code);\n\n    /*intra = 0;*/\n\n    if (code >= 1024) tab = &PV_DCT3Dtab6[(code >> 6) - 16];\n    else\n    {\n        if (code >= 256) tab = &PV_DCT3Dtab7[(code >> 3) - 32];\n        else\n        {\n            if (code >= 16) tab = &PV_DCT3Dtab8[(code>>1) - 8];\n            else return PV_FAIL;\n        }\n    }\n\n    PV_BitstreamFlushBits(stream, tab->len + 1);\n    pTcoef->sign = (code >> (12 - tab->len)) & 1;\n    pTcoef->run = (uint)tab->run;//(tab->val >> 4) & 255;\n    pTcoef->level = (int)tab->level;//tab->val & 15;\n    pTcoef->last = (uint)tab->last;//(tab->val >> 12) & 1;\n\n    /* the following is modified for 3-mode escape -- boon */\n    if (((tab->run << 6) | (tab->level) | (tab->last << 12)) != VLC_ESCAPE_CODE)    /* ESCAPE */\n    {\n        return PV_SUCCESS;\n    }\n    /* escape mode 4 - H.263 type */\n    pTcoef->last = pTcoef->sign; /* Last */\n    pTcoef->run = BitstreamReadBits16(stream, 6); /* Run */\n    pTcoef->level = (int) BitstreamReadBits16(stream, 8); /* Level */\n\n    if (pTcoef->level == 0 || pTcoef->level == 128)\n    {\n        return PV_FAIL;\n    }\n\n\n    if (pTcoef->level > 128)\n    {\n        pTcoef->sign = 1;\n        pTcoef->level = 256 - pTcoef->level;\n    }\n    else pTcoef->sign = 0;\n\n\n\n    return PV_SUCCESS;\n\n}   /* VlcDecTCOEFShortHeader_AnnexI */\n\nPV_STATUS VlcDecTCOEFShortHeader_AnnexT(BitstreamDecVideo *stream, Tcoef *pTcoef/*, int intra*/)\n{\n    uint code;\n    const VLCtab2 *tab;\n\n    BitstreamShow13Bits(stream, &code);\n\n    /*intra = 0;*/\n\n    if (code >= 1024) tab = &PV_DCT3Dtab0[(code >> 6) - 16];\n    else\n    {\n        if (code >= 256) tab = &PV_DCT3Dtab1[(code >> 3) - 32];\n        else\n        {\n            if (code >= 16) tab = &PV_DCT3Dtab2[(code>>1) - 8];\n            else return PV_FAIL;\n        }\n    }\n\n    PV_BitstreamFlushBits(stream, tab->len + 1);\n    pTcoef->sign = (code >> (12 - tab->len)) & 1;\n    pTcoef->run = (uint)tab->run;//(tab->val >> 4) & 255;\n    pTcoef->level = (int)tab->level;//tab->val & 15;\n    pTcoef->last = (uint)tab->last;//(tab->val >> 12) & 1;\n\n    /* the following is modified for 3-mode escape --  */\n    if (((tab->run << 4) | (tab->level) | (tab->last << 12)) != VLC_ESCAPE_CODE)    /* ESCAPE */\n    {\n        return PV_SUCCESS;\n    }\n    /* escape mode 4 - H.263 type */\n    pTcoef->last = pTcoef->sign; /* Last */\n    pTcoef->run = BitstreamReadBits16(stream, 6); /* Run */\n    pTcoef->level = (int) BitstreamReadBits16(stream, 8); /* Level */\n\n    if (pTcoef->level == 0)\n    {\n        return PV_FAIL;\n    }\n\n    if (pTcoef->level >= 128)\n    {\n        pTcoef->sign = 1;\n        pTcoef->level = 256 - pTcoef->level;\n    }\n    else\n    {\n        pTcoef->sign = 0;\n    }\n\n    if (pTcoef->level == 128)\n    {\n        code = BitstreamReadBits16(stream, 11);        /* ANNEX_T */\n\n        code = (code >> 6 & 0x1F) | (code << 5 & 0x7ff);\n        if (code > 1024)\n        {\n            pTcoef->sign = 1;\n            pTcoef->level = (2048 - code);\n        }\n        else\n        {\n            pTcoef->sign = 0;\n            pTcoef->level = code;\n        }\n    }\n\n    return PV_SUCCESS;\n\n}   /* VlcDecTCOEFShortHeader */\n\n\nPV_STATUS VlcDecTCOEFShortHeader_AnnexIT(BitstreamDecVideo *stream, Tcoef *pTcoef/*, int intra*/)\n{\n    uint code;\n    const VLCtab2 *tab;\n\n    BitstreamShow13Bits(stream, &code);\n\n    /*intra = 0;*/\n\n    if (code >= 1024) tab = &PV_DCT3Dtab6[(code >> 6) - 16];\n    else\n    {\n        if (code >= 256) tab = &PV_DCT3Dtab7[(code >> 3) - 32];\n        else\n        {\n            if (code >= 16) tab = &PV_DCT3Dtab8[(code>>1) - 8];\n            else return PV_FAIL;\n        }\n    }\n\n    PV_BitstreamFlushBits(stream, tab->len + 1);\n    pTcoef->sign = (code >> (12 - tab->len)) & 1;\n    pTcoef->run = (uint)tab->run;//(tab->val >> 4) & 255;\n    pTcoef->level = (int)tab->level;//tab->val & 15;\n    pTcoef->last = (uint)tab->last;//(tab->val >> 12) & 1;\n\n    /* the following is modified for 3-mode escape --  */\n    if (((tab->run << 6) | (tab->level) | (tab->last << 12)) != VLC_ESCAPE_CODE)    /* ESCAPE */\n    {\n        return PV_SUCCESS;\n    }\n    /* escape mode 4 - H.263 type */\n    pTcoef->last = pTcoef->sign; /* Last */\n    pTcoef->run = BitstreamReadBits16(stream, 6); /* Run */\n    pTcoef->level = (int) BitstreamReadBits16(stream, 8); /* Level */\n\n    if (pTcoef->level == 0)\n    {\n        return PV_FAIL;\n    }\n\n    if (pTcoef->level >= 128)\n    {\n        pTcoef->sign = 1;\n        pTcoef->level = 256 - pTcoef->level;\n    }\n    else\n    {\n        pTcoef->sign = 0;\n    }\n\n    if (pTcoef->level == 128)\n    {\n        code = BitstreamReadBits16(stream, 11);        /* ANNEX_T */\n\n        code = (code >> 6 & 0x1F) | (code << 5 & 0x7ff);\n        if (code > 1024)\n        {\n            pTcoef->sign = 1;\n            pTcoef->level = (2048 - code);\n        }\n        else\n        {\n            pTcoef->sign = 0;\n            pTcoef->level = code;\n        }\n    }\n\n\n    return PV_SUCCESS;\n\n}   /* VlcDecTCOEFShortHeader_AnnexI */\n#endif\n/***********************************************************CommentBegin******\n*       3/30/2000 : initial modification to the new PV-Decoder Lib\n*                           format and the change of error-handling method.\n*                           The coefficient is now returned thru a pre-\n*                           initialized parameters for speedup.\n*\n***********************************************************CommentEnd********/\n\n\nPV_STATUS RvlcDecTCOEFInter(BitstreamDecVideo *stream, Tcoef *pTcoef)\n{\n    uint code, mask;\n    const VLCtab2 *tab2;\n    int count, len, num[2] = {0, 0} /*  01/30/01 */;\n\n    mask = 0x4000;      /* mask  100000000000000   */\n    BitstreamShow15Bits(stream, &code);   /*  03/07/01 */\n\n    len = 1;\n\n    //  09/20/99 Escape mode\n    /// Bitstream Exchange\n    if (code < 2048)\n    {\n        PV_BitstreamFlushBits(stream, 5);\n        pTcoef->last = BitstreamRead1Bits_INLINE(stream);\n        pTcoef->run = BitstreamReadBits16_INLINE(stream, 6);\n        //  09/20/99 New marker bit\n        PV_BitstreamFlushBits(stream, 1);\n        //  09/20/99 The length for LEVEL used to be 7 in the old version\n        pTcoef->level = (int)(BitstreamReadBits16_INLINE(stream, 12) >> 1);\n        //  09/20/99 Another new marker bit\n//      PV_BitstreamFlushBitsCheck(stream, 1);\n        pTcoef->sign = BitstreamReadBits16_INLINE(stream, 5) & 0x1;  /* fix   3/13/01  */\n        return PV_SUCCESS;\n    }\n\n    if (code & mask)\n    {\n        count = 1;\n        while (mask && count > 0)       /* fix  3/28/01  */\n        {\n            mask = mask >> 1;\n            if (code & mask)\n                count--;\n            else\n                num[0]++; /* number of zeros in the middle */\n            len++;\n        }\n    }\n    else\n    {\n        count = 2;\n        while (mask && count > 0)           /* fix  3/28/01  */\n        {\n            mask = mask >> 1;\n            if (!(code & mask))\n                count--;\n            else\n                num[count-1]++; /* number of ones in the middle */\n            len++;\n        }\n    }\n\n    code = code & 0x7fff;\n    code = code >> (15 - (len + 1));\n\n    /*  1/30/01, add fast decoding algorithm here */\n    /* code is in two forms : 0xxxx0xxx00 or 0xxx0xxx01\n                         num[1] and num[0] x\n                        or  : 1xxxxx10 or 1xxxxx11\n                                num[0]  x      */\n\n    /* len+1 is the length of the above */\n\n    if (num[1] > 10 || num[0] > 11) /* invalid RVLC code */\n        return PV_FAIL;\n\n    if (code&(1 << len))\n        tab2 = RvlcDCTtabInter + 146 + (num[0] << 1) + (code & 1);\n    else\n        tab2 = RvlcDCTtabInter + ptrRvlcTab[num[1]] + (num[0] << 1) + (code & 1);\n\n    PV_BitstreamFlushBits(stream, (int) tab2->len);\n    pTcoef->run = (uint)tab2->run;//(tab->val >> 8) & 255;\n    pTcoef->level = (int)tab2->level;//tab->val & 255;\n    pTcoef->last = (uint)tab2->last;//(tab->val >> 16) & 1;\n\n    pTcoef->sign = BitstreamRead1Bits_INLINE(stream);\n    return PV_SUCCESS;\n}               /* RvlcDecTCOEFInter */\n\nPV_STATUS RvlcDecTCOEFIntra(BitstreamDecVideo *stream, Tcoef *pTcoef)\n{\n    uint code, mask;\n    const VLCtab2 *tab2;\n    int count, len, num[2] = {0, 0} /*  01/30/01 */;\n\n    mask = 0x4000;      /* mask  100000000000000   */\n    BitstreamShow15Bits(stream, &code);\n\n    len = 1;\n\n    //  09/20/99 Escape mode\n    /// Bitstream Exchange\n    if (code < 2048)\n    {\n        PV_BitstreamFlushBits(stream, 5);\n        pTcoef->last = BitstreamRead1Bits_INLINE(stream);\n        pTcoef->run = BitstreamReadBits16_INLINE(stream, 6);\n        //  09/20/99 New marker bit\n        PV_BitstreamFlushBits(stream, 1);\n        //  09/20/99 The length for LEVEL used to be 7 in the old version\n        pTcoef->level = (int)(BitstreamReadBits16_INLINE(stream, 12) >> 1);\n        //  09/20/99 Another new marker bit\n//      PV_BitstreamFlushBitsCheck(stream, 1);\n        pTcoef->sign = BitstreamReadBits16_INLINE(stream, 5) & 0x1; /* fix   03/13/01 */\n        return PV_SUCCESS;\n    }\n\n    if (code & mask)\n    {\n        count = 1;\n        while (mask && count > 0)                          /* fix  03/28/01 */\n        {\n            mask = mask >> 1;\n            if (code & mask)\n                count--;\n            else\n                num[0]++; /* number of zeros in the middle */\n            len++;\n        }\n    }\n    else\n    {\n        count = 2;\n        while (mask && count > 0)              /* fix  03/28/01 */\n        {\n            mask = mask >> 1;\n            if (!(code & mask))\n                count--;\n            else\n                num[count-1]++; /* number of ones in the middle */\n            len++;\n        }\n    }\n\n    code = code & 0x7fff;\n    code = code >> (15 - (len + 1));\n\n    /*  1/30/01, add fast decoding algorithm here */\n    /* code is in two forms : 0xxxx0xxx00 or 0xxx0xxx01\n                         num[1] and num[0] x\n                        or  : 1xxxxx10 or 1xxxxx11\n                                num[0]  x      */\n\n    /* len+1 is the length of the above */\n\n    if (num[1] > 10 || num[0] > 11) /* invalid RVLC code */\n        return PV_FAIL;\n\n    if (code & (1 << len))\n        tab2 = RvlcDCTtabIntra + 146 + (num[0] << 1) + (code & 1);\n    else\n        tab2 = RvlcDCTtabIntra + ptrRvlcTab[num[1]] + (num[0] << 1) + (code & 1);\n\n    PV_BitstreamFlushBits(stream, (int) tab2->len);\n    pTcoef->run = (uint)tab2->run;//(tab->val >> 8) & 255;\n    pTcoef->level = (int)tab2->level;//tab->val & 255;\n    pTcoef->last = (uint)tab2->last;//(tab->val >> 16) & 1;\n\n    pTcoef->sign = BitstreamRead1Bits_INLINE(stream);\n    return PV_SUCCESS;\n}               /* RvlcDecTCOEFIntra */\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/vlc_decode.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*\n-------------------------------------------------------------------\n                    MPEG-4 Simple Profile Video Decoder\n-------------------------------------------------------------------\n*\n* This software module was originally developed by\n*\n*   Paulo Nunes (IST / ACTS-MoMuSyS)\n*\n* in the course of development of the MPEG-4 Video (ISO/IEC 14496-2) standard.\n* This software module is an implementation of a part of one or more MPEG-4\n* Video (ISO/IEC 14496-2) tools as specified by the MPEG-4 Video (ISO/IEC\n* 14496-2) standard.\n*\n* ISO/IEC gives users of the MPEG-4 Video (ISO/IEC 14496-2) standard free\n* license to this software module or modifications thereof for use in hardware\n* or software products claiming conformance to the MPEG-4 Video (ISO/IEC\n* 14496-2) standard.\n*\n* Those intending to use this software module in hardware or software products\n* are advised that its use may infringe existing patents. The original\n* developer of this software module and his/her company, the subsequent\n* editors and their companies, and ISO/IEC have no liability for use of this\n* software module or modifications thereof in an implementation. Copyright is\n* not released for non MPEG-4 Video (ISO/IEC 14496-2) Standard conforming\n* products.\n*\n* ACTS-MoMuSys partners retain full right to use the code for his/her own\n* purpose, assign or donate the code to a third party and to inhibit third\n* parties from using the code for non MPEG-4 Video (ISO/IEC 14496-2) Standard\n* conforming products. This copyright notice must be included in all copies or\n* derivative works.\n*\n* Copyright (c) 1996\n*\n*****************************************************************************/\n\n/***********************************************************HeaderBegin*******\n*\n* File: vlc_dec.h\n*\n* Author:   Paulo Nunes (IST) - Paulo.Nunes@lx.it.pt\n* Created:\n*\n* Description: This is the header file for the \"vlcdec\" module.\n*\n* Notes:\n*\n* Modified: 9-May-96 Paulo Nunes: Reformatted. New headers.\n*\n* ================= PacketVideo Modification ================================\n*\n*       3/30/00  : initial modification to the\n*                new PV-Decoder Lib format.\n*\n***********************************************************CommentEnd********/\n\n\n#ifndef _VLCDECODE_H_\n#define _VLCDECODE_H_\n\n#include \"mp4lib_int.h\"\n\n#define VLC_ERROR_DETECTED(x) ((x) < 0)\n#define VLC_IO_ERROR    -1\n#define VLC_CODE_ERROR  -2\n#define VLC_MB_STUFFING -4\n#define VLC_NO_LAST_BIT -5\n\n#define VLC_ESCAPE_CODE  7167\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif /* __cplusplus */\n\n    PV_STATUS DecodeUserData(BitstreamDecVideo *stream);\n    PV_STATUS PV_GetMBvectors(VideoDecData *, uint mode);\n    PV_STATUS PV_DecodeMBVec(BitstreamDecVideo *stream, MOT *mv_x, MOT *mv_y, int f_code_f);\n    PV_STATUS PV_DeScaleMVD(int f_code, int residual, int vlc_code_mag,  MOT *vector);\n\n    PV_STATUS PV_VlcDecMV(BitstreamDecVideo *stream, int *mv);\n    int PV_VlcDecMCBPC_com_intra(BitstreamDecVideo *stream);\n    int PV_VlcDecMCBPC_com_inter(BitstreamDecVideo *stream);\n#ifdef PV_ANNEX_IJKT_SUPPORT\n    int PV_VlcDecMCBPC_com_inter_H263(BitstreamDecVideo *stream);\n    PV_STATUS VlcDecTCOEFShortHeader_AnnexI(BitstreamDecVideo *stream, Tcoef *pTcoef);\n    PV_STATUS VlcDecTCOEFShortHeader_AnnexT(BitstreamDecVideo *stream, Tcoef *pTcoef); /* ANNEX_T */\n    PV_STATUS VlcDecTCOEFShortHeader_AnnexIT(BitstreamDecVideo *stream, Tcoef *pTcoef);\n#endif\n    int PV_VlcDecCBPY(BitstreamDecVideo *stream, int intra);\n\n    PV_STATUS VlcDecTCOEFIntra(BitstreamDecVideo *stream, Tcoef *pTcoef);\n    PV_STATUS VlcDecTCOEFInter(BitstreamDecVideo *stream, Tcoef *pTcoef);\n    PV_STATUS VlcDecTCOEFShortHeader(BitstreamDecVideo *stream, Tcoef *pTcoef);\n    PV_STATUS RvlcDecTCOEFIntra(BitstreamDecVideo *stream, Tcoef *pTcoef);\n    PV_STATUS RvlcDecTCOEFInter(BitstreamDecVideo *stream, Tcoef *pTcoef);\n    PV_STATUS PV_VlcDecIntraDCPredSize(BitstreamDecVideo *stream, int compnum, uint *DC_size);\n\n#ifdef __cplusplus\n}\n#endif /* __cplusplus  */\n\n#endif\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/vlc_dequant.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"mp4dec_lib.h\"\n#include \"vlc_decode.h\"\n#include \"zigzag.h\"\n\n\ntypedef PV_STATUS(*VlcDecFuncP)(BitstreamDecVideo *stream, Tcoef *pTcoef);\nstatic const uint8 AC_rowcol[64] = {    0, 0, 0, 0, 0, 0, 0, 0,\n                                        0, 1, 1, 1, 1, 1, 1, 1,\n                                        0, 1, 1, 1, 1, 1, 1, 1,\n                                        0, 1, 1, 1, 1, 1, 1, 1,\n                                        0, 1, 1, 1, 1, 1, 1, 1,\n                                        0, 1, 1, 1, 1, 1, 1, 1,\n                                        0, 1, 1, 1, 1, 1, 1, 1,\n                                        0, 1, 1, 1, 1, 1, 1, 1,\n                                   };\nstatic const uint8 mask[8] = /*  for fast bitmap */\n    {128, 64, 32, 16, 8, 4, 2, 1};\n\n\n\n/***********************************************************CommentBegin******\n*\n* -- VlcDequantMpegBlock -- Decodes the DCT coefficients of one 8x8 block and perform\n            dequantization using Mpeg mode.\n    Date:       08/08/2000\n\n    Modified:      3/21/01\n                Added pre IDCT clipping, new ACDC prediction structure, ACDC prediction clipping,\n                16-bit int case, removed multiple zigzaging\n******************************************************************************/\n\n#ifdef PV_SUPPORT_MAIN_PROFILE\nint VlcDequantMpegIntraBlock(void *vid, int comp, int switched,\n                             uint8 *bitmapcol, uint8 *bitmaprow)\n{\n    VideoDecData *video = (VideoDecData*) vid;\n    Vol *currVol = video->vol[video->currLayer];\n    BitstreamDecVideo *stream = video->bitstream;\n    int16 *datablock = video->mblock->block[comp]; /* 10/20/2000, assume it has been reset of all-zero !!!*/\n    int mbnum = video->mbnum;\n    uint CBP = video->headerInfo.CBP[mbnum];\n    int QP = video->QPMB[mbnum];\n    typeDCStore *DC = video->predDC + mbnum;\n    int x_pos = video->mbnum_col;\n    typeDCACStore *DCAC_row = video->predDCAC_row + x_pos;\n    typeDCACStore *DCAC_col = video->predDCAC_col;\n    uint ACpred_flag = (uint) video->acPredFlag[mbnum];\n\n    /*** VLC *****/\n    int i, j, k;\n    Tcoef run_level;\n    int last, return_status;\n    VlcDecFuncP vlcDecCoeff;\n    int direction;\n    const int *inv_zigzag;\n    /*** Quantizer ****/\n    int dc_scaler;\n    int sum;\n    int *qmat;\n    int32 temp;\n\n    const int B_Xtab[6] = {0, 1, 0, 1, 2, 3};\n    const int B_Ytab[6] = {0, 0, 1, 1, 2, 3};\n\n    int16 *dcac_row, *dcac_col;\n\n    dcac_row = (*DCAC_row)[B_Xtab[comp]];\n    dcac_col = (*DCAC_col)[B_Ytab[comp]];\n\n\n    i = 1 - switched;\n\n#ifdef FAST_IDCT\n    *((uint32*)bitmapcol) = *((uint32*)(bitmapcol + 4)) = 0;\n    *bitmaprow = 0;\n#endif\n\n\n    /* select which Huffman table to be used */\n    vlcDecCoeff = video->vlcDecCoeffIntra;\n\n    dc_scaler = (comp < 4) ? video->mblock->DCScalarLum : video->mblock->DCScalarChr;\n\n    /* enter the zero run decoding loop */\n    sum = 0;\n    qmat = currVol->iqmat;\n\n    /* perform only VLC decoding */\n    /* We cannot do DCACrecon before VLC decoding.  10/17/2000 */\n    doDCACPrediction(video, comp, datablock, &direction);\n    if (!ACpred_flag) direction = 0;\n    inv_zigzag = zigzag_inv + (ACpred_flag << 6) + (direction << 6);\n    if (CBP & (1 << (5 - comp)))\n    {\n        do\n        {\n            return_status = (*vlcDecCoeff)(stream, &run_level);\n            if (return_status != PV_SUCCESS)\n            {\n                last = 1;/*  11/1/2000 let it slips undetected, just like\n                         in original version */\n                i = VLC_ERROR;\n                ACpred_flag = 0;    /* no of coefficients should not get reset   03/07/2002 */\n                break;\n            }\n\n            i += run_level.run;\n            last = run_level.last;\n            if (i >= 64)\n            {\n                /*  i = NCOEFF_BLOCK; */    /*  11/1/00 */\n                ACpred_flag = 0;    /* no of coefficients should not get reset   03/07/2002 */\n                i = VLC_NO_LAST_BIT;\n                last = 1;\n                break;\n            }\n\n            k = inv_zigzag[i];\n\n            if (run_level.sign == 1)\n            {\n                datablock[k] -= run_level.level;\n            }\n            else\n            {\n                datablock[k] += run_level.level;\n            }\n\n            if (AC_rowcol[k])\n            {\n                temp = (int32)datablock[k] * qmat[k] * QP;\n                temp = (temp + (0x7 & (temp >> 31))) >> 3;\n                if (temp > 2047) temp = 2047;\n                else if (temp < -2048) temp = -2048;\n                datablock[k] = (int) temp;\n\n#ifdef FAST_IDCT\n                bitmapcol[k&0x7] |= mask[k>>3];\n#endif\n                sum ^= temp;\n            }\n\n            i++;\n        }\n        while (!last);\n\n    }\n    else\n    {\n        i = 1;       /*  04/26/01  needed for switched case */\n    }\n    ///// NEED TO DEQUANT THOSE PREDICTED AC COEFF\n    /* dequantize the rest of AC predicted coeff that haven't been dequant */\n    if (ACpred_flag)\n    {\n\n        i = NCOEFF_BLOCK; /* otherwise, FAST IDCT won't work correctly,  10/18/2000 */\n\n        if (!direction) /* check vertical */\n        {\n            dcac_row[0]  = datablock[1];\n            dcac_row[1]  = datablock[2];\n            dcac_row[2]  = datablock[3];\n            dcac_row[3]  = datablock[4];\n            dcac_row[4]  = datablock[5];\n            dcac_row[5]  = datablock[6];\n            dcac_row[6]  = datablock[7];\n\n            for (j = 0, k = 8; k < 64; k += 8, j++)\n            {\n                if (dcac_col[j] = datablock[k])\n                {     /* ACDC clipping  03/26/01 */\n                    if (datablock[k] > 2047) dcac_col[j] = 2047;\n                    else if (datablock[k] < -2048) dcac_col[j] = -2048;\n\n                    temp = (int32)dcac_col[j] * qmat[k] * QP;\n                    temp = (temp + (0x7 & (temp >> 31))) >> 3;  /*  03/26/01*/\n                    if (temp > 2047) temp = 2047;\n                    else if (temp < -2048) temp = -2048;\n                    datablock[k] = (int)temp;\n                    sum ^= temp; /*  7/5/01 */\n#ifdef FAST_IDCT\n                    bitmapcol[0] |= mask[k>>3];\n#endif\n\n                }\n            }\n            for (k = 1; k < 8; k++)\n            {\n                if (datablock[k])\n                {\n                    temp = (int32)datablock[k] * qmat[k] * QP;\n                    temp = (temp + (0x7 & (temp >> 31))) >> 3;  /*  03/26/01*/\n                    if (temp > 2047) temp = 2047;\n                    else if (temp < -2048) temp = -2048;\n                    datablock[k] = (int)temp;\n                    sum ^= temp; /*  7/5/01 */\n#ifdef FAST_IDCT\n                    bitmapcol[k] |= 128;\n#endif\n\n                }\n            }\n\n        }\n        else\n        {\n\n            dcac_col[0]  = datablock[8];\n            dcac_col[1]  = datablock[16];\n            dcac_col[2]  = datablock[24];\n            dcac_col[3]  = datablock[32];\n            dcac_col[4]  = datablock[40];\n            dcac_col[5]  = datablock[48];\n            dcac_col[6]  = datablock[56];\n\n\n            for (j = 0, k = 1; k < 8; k++, j++)\n            {\n                if (dcac_row[j] = datablock[k])\n                {     /* ACDC clipping  03/26/01 */\n                    if (datablock[k] > 2047) dcac_row[j] = 2047;\n                    else if (datablock[k] < -2048) dcac_row[j] = -2048;\n\n                    temp = (int32)dcac_row[j] * qmat[k] * QP;\n                    temp = (temp + (0x7 & (temp >> 31))) >> 3;  /*  03/26/01 */\n                    if (temp > 2047) temp = 2047;\n                    else if (temp < -2048) temp = -2048;\n                    datablock[k] = (int)temp;\n                    sum ^= temp;\n#ifdef FAST_IDCT\n                    bitmapcol[k] |= 128;\n#endif\n\n                }\n            }\n\n            for (k = 8; k < 64; k += 8)\n            {\n                if (datablock[k])\n                {\n                    temp = (int32)datablock[k] * qmat[k] * QP;\n                    temp = (temp + (0x7 & (temp >> 31))) >> 3;  /*  03/26/01 */\n                    if (temp > 2047) temp = 2047;\n                    else if (temp < -2048) temp = -2048;\n                    datablock[k] = (int)temp;\n                    sum ^= temp;\n#ifdef FAST_IDCT\n                    bitmapcol[0] |= mask[k>>3];\n#endif\n                }\n            }\n\n        }\n    }\n    else\n    {\n\n        /* Store the qcoeff-values needed later for prediction */\n\n        dcac_row[0] = datablock[1];                /*  ACDC, no need for clipping */\n        dcac_row[1] = datablock[2];\n        dcac_row[2] = datablock[3];\n        dcac_row[3] = datablock[4];\n        dcac_row[4] = datablock[5];\n        dcac_row[5] = datablock[6];\n        dcac_row[6] = datablock[7];\n\n        dcac_col[0] = datablock[8];\n        dcac_col[1] = datablock[16];\n        dcac_col[2] = datablock[24];\n        dcac_col[3] = datablock[32];\n        dcac_col[4] = datablock[40];\n        dcac_col[5] = datablock[48];\n        dcac_col[6] = datablock[56];\n\n        for (k = 1; k < 8; k++)\n        {\n            if (datablock[k])\n            {\n                temp = (int32)datablock[k] * qmat[k] * QP;\n                temp = (temp + (0x7 & (temp >> 31))) >> 3;  /*  03/26/01*/\n                if (temp > 2047) temp = 2047;\n                else if (temp < -2048) temp = -2048;\n                datablock[k] = (int)temp;\n                sum ^= temp; /*  7/5/01 */\n#ifdef FAST_IDCT\n                bitmapcol[k] |= 128;\n#endif\n\n            }\n        }\n        for (k = 8; k < 64; k += 8)\n        {\n            if (datablock[k])\n            {\n                temp = (int32)datablock[k] * qmat[k] * QP;\n                temp = (temp + (0x7 & (temp >> 31))) >> 3;  /*  03/26/01 */\n                if (temp > 2047) temp = 2047;\n                else if (temp < -2048) temp = -2048;\n                datablock[k] = (int)temp;\n                sum ^= temp;\n#ifdef FAST_IDCT\n                bitmapcol[0] |= mask[k>>3];\n#endif\n            }\n        }\n\n    }\n\n\n\n    if (datablock[0])\n    {\n        temp = (int32)datablock[0] * dc_scaler;\n        if (temp > 2047) temp = 2047;            /*  03/14/01 */\n        else if (temp < -2048)  temp = -2048;\n        datablock[0] = (int)temp;\n        sum ^= temp;\n#ifdef FAST_IDCT\n        bitmapcol[0] |= 128;\n#endif\n    }\n\n    if ((sum & 1) == 0)\n    {\n        datablock[63] = datablock[63] ^ 0x1;\n#ifdef FAST_IDCT   /*  7/5/01, need to update bitmap */\n        if (datablock[63])\n            bitmapcol[7] |= 1;\n#endif\n        i = (-64 & i) | NCOEFF_BLOCK;   /*  if i > -1 then i is set to NCOEFF_BLOCK */\n    }\n\n\n#ifdef FAST_IDCT\n    if (i > 10)\n    {\n        for (k = 1; k < 4; k++)\n        {\n            if (bitmapcol[k] != 0)\n            {\n                (*bitmaprow) |= mask[k];\n            }\n        }\n    }\n#endif\n\n    /* Store the qcoeff-values needed later for prediction */\n    (*DC)[comp] = datablock[0];\n    return i;\n\n}\n\n\n/***********************************************************CommentBegin******\n*\n* -- VlcDequantMpegInterBlock -- Decodes the DCT coefficients of one 8x8 block and perform\n            dequantization using Mpeg mode for INTER block.\n    Date:       08/08/2000\n    Modified:              3/21/01\n                clean up, added clipping, 16-bit int case, new ACDC prediction\n******************************************************************************/\n\n\nint VlcDequantMpegInterBlock(void *vid, int comp,\n                             uint8 *bitmapcol, uint8 *bitmaprow)\n{\n    VideoDecData *video = (VideoDecData*) vid;\n    BitstreamDecVideo *stream = video->bitstream;\n    Vol *currVol = video->vol[video->currLayer];\n    int16 *datablock = video->mblock->block[comp]; /* 10/20/2000, assume it has been reset of all-zero !!!*/\n    int mbnum = video->mbnum;\n    int QP = video->QPMB[mbnum];\n    /*** VLC *****/\n    int i, k;\n    Tcoef run_level;\n    int last, return_status;\n    VlcDecFuncP vlcDecCoeff;\n\n    /*** Quantizer ****/\n    int sum;\n    int *qmat;\n\n    int32 temp;\n\n    i = 0 ;\n\n#ifdef FAST_IDCT\n    *((uint32*)bitmapcol) = *((uint32*)(bitmapcol + 4)) = 0;\n    *bitmaprow = 0;\n#endif\n\n    /* select which Huffman table to be used */\n    vlcDecCoeff = video->vlcDecCoeffInter;\n\n    /* enter the zero run decoding loop */\n    sum = 0;\n    qmat = currVol->niqmat;\n    do\n    {\n        return_status = (*vlcDecCoeff)(stream, &run_level);\n        if (return_status != PV_SUCCESS)\n        {\n            last = 1;/*  11/1/2000 let it slips undetected, just like\n                     in original version */\n            i = VLC_ERROR;\n            sum = 1;    /* no of coefficients should not get reset   03/07/2002 */\n            break;\n        }\n\n        i += run_level.run;\n        last = run_level.last;\n        if (i >= 64)\n        {\n            /*  i = NCOEFF_BLOCK; */    /*  11/1/00 */\n            //return VLC_NO_LAST_BIT;\n            i = VLC_NO_LAST_BIT;\n            last = 1;\n            sum = 1;    /* no of coefficients should not get reset   03/07/2002 */\n            break;\n        }\n\n        k = zigzag_inv[i];\n\n        if (run_level.sign == 1)\n        {\n            temp = (-(int32)(2 * run_level.level + 1) * qmat[k] * QP + 15) >> 4; /*  03/23/01 */\n            if (temp < -2048) temp = - 2048;\n        }\n        else\n        {\n            temp = ((int32)(2 * run_level.level + 1) * qmat[k] * QP) >> 4; /*  03/23/01 */\n            if (temp > 2047) temp = 2047;\n        }\n\n        datablock[k] = (int)temp;\n\n#ifdef FAST_IDCT\n        bitmapcol[k&0x7] |= mask[k>>3];\n#endif\n        sum ^= temp;\n\n        i++;\n    }\n    while (!last);\n\n    if ((sum & 1) == 0)\n    {\n        datablock[63] = datablock[63] ^ 0x1;\n#ifdef FAST_IDCT   /*  7/5/01, need to update bitmap */\n        if (datablock[63])\n            bitmapcol[7] |= 1;\n#endif\n        i = NCOEFF_BLOCK;\n    }\n\n\n#ifdef FAST_IDCT\n    if (i > 10)\n    {\n        for (k = 1; k < 4; k++)               /*  07/19/01 */\n        {\n            if (bitmapcol[k] != 0)\n            {\n                (*bitmaprow) |= mask[k];\n            }\n        }\n    }\n#endif\n\n    return i;\n}\n#endif\n/***********************************************************CommentBegin******\n*\n* -- VlcDequantIntraH263Block -- Decodes the DCT coefficients of one 8x8 block and perform\n            dequantization in H.263 mode for INTRA block.\n    Date:       08/08/2000\n    Modified:               3/21/01\n                clean up, added clipping, 16-bit int case, removed multiple zigzaging\n******************************************************************************/\n\n\nint VlcDequantH263IntraBlock(VideoDecData *video, int comp, int switched,\n                             uint8 *bitmapcol, uint8 *bitmaprow)\n{\n    BitstreamDecVideo *stream = video->bitstream;\n    int16 *datablock = video->mblock->block[comp]; /* 10/20/2000, assume it has been reset of all-zero !!!*/\n    int32 temp;\n    int mbnum = video->mbnum;\n    uint CBP = video->headerInfo.CBP[mbnum];\n    int QP = video->QPMB[mbnum];\n    typeDCStore *DC = video->predDC + mbnum;\n    int x_pos = video->mbnum_col;\n    typeDCACStore *DCAC_row = video->predDCAC_row + x_pos;\n    typeDCACStore *DCAC_col = video->predDCAC_col;\n    uint ACpred_flag = (uint) video->acPredFlag[mbnum];\n\n    /*** VLC *****/\n    int i, j, k;\n    Tcoef run_level;\n    int last, return_status;\n    VlcDecFuncP vlcDecCoeff;\n    int direction;\n    const int *inv_zigzag;\n\n    /*** Quantizer ****/\n    int dc_scaler;\n    int sgn_coeff;\n\n\n\n    const int B_Xtab[6] = {0, 1, 0, 1, 2, 3};\n    const int B_Ytab[6] = {0, 0, 1, 1, 2, 3};\n\n    int16 *dcac_row, *dcac_col;\n\n    dcac_row = (*DCAC_row)[B_Xtab[comp]];\n    dcac_col = (*DCAC_col)[B_Ytab[comp]];\n\n#ifdef FAST_IDCT\n    *((uint32*)bitmapcol) = *((uint32*)(bitmapcol + 4)) = 0;\n    *bitmaprow = 0;\n#endif\n    /* select which Huffman table to be used */\n    vlcDecCoeff = video->vlcDecCoeffIntra;\n\n    dc_scaler = (comp < 4) ? video->mblock->DCScalarLum : video->mblock->DCScalarChr;\n\n    /* perform only VLC decoding */\n    doDCACPrediction(video, comp, datablock, &direction);\n    if (!ACpred_flag) direction = 0;\n\n    inv_zigzag = zigzag_inv + (ACpred_flag << 6) + (direction << 6);  /*  04/17/01 */\n\n    i = 1;\n    if (CBP & (1 << (5 - comp)))\n    {\n        i = 1 - switched;\n        do\n        {\n            return_status = (*vlcDecCoeff)(stream, &run_level);\n            if (return_status != PV_SUCCESS)\n            {\n                last = 1;/* 11/1/2000 let it slips undetected, just like\n                         in original version */\n                i = VLC_ERROR;\n                ACpred_flag = 0;   /* no of coefficients should not get reset   03/07/2002 */\n                break;\n            }\n\n            i += run_level.run;\n            last = run_level.last;\n            if (i >= 64)\n            {\n                ACpred_flag = 0;    /* no of coefficients should not get reset   03/07/2002 */\n                i = VLC_NO_LAST_BIT;\n                last = 1;\n                break;\n            }\n\n            k = inv_zigzag[i];\n\n            if (run_level.sign == 1)\n            {\n                datablock[k] -= run_level.level;\n                sgn_coeff = -1;\n            }\n            else\n            {\n                datablock[k] += run_level.level;\n                sgn_coeff = 1;\n            }\n\n\n            if (AC_rowcol[k])   /*  10/25/2000 */\n            {\n                temp = (int32)QP * (2 * datablock[k] + sgn_coeff) - sgn_coeff + (QP & 1) * sgn_coeff;\n                if (temp > 2047) temp = 2047;            /*  03/14/01 */\n                else if (temp < -2048)  temp = -2048;\n                datablock[k] = (int16) temp;\n\n#ifdef FAST_IDCT\n                bitmapcol[k&0x7] |= mask[k>>3];\n#endif\n            }\n\n            i++;\n        }\n        while (!last);\n\n    }\n\n    ///// NEED TO DEQUANT THOSE PREDICTED AC COEFF\n    /* dequantize the rest of AC predicted coeff that haven't been dequant */\n    if (ACpred_flag)\n    {\n\n        i = NCOEFF_BLOCK; /* otherwise, FAST IDCT won't work correctly,  10/18/2000 */\n\n        if (!direction) /* check vertical */\n        {\n\n            dcac_row[0]  = datablock[1];\n            dcac_row[1]  = datablock[2];\n            dcac_row[2]  = datablock[3];\n            dcac_row[3]  = datablock[4];\n            dcac_row[4]  = datablock[5];\n            dcac_row[5]  = datablock[6];\n            dcac_row[6]  = datablock[7];\n\n            for (j = 0, k = 8; k < 64; k += 8, j++)\n            {\n                dcac_col[j] = datablock[k];\n                if (dcac_col[j])\n                {\n                    if (datablock[k] > 0)\n                    {\n                        if (datablock[k] > 2047) dcac_col[j] = 2047;\n                        sgn_coeff = 1;\n                    }\n                    else\n                    {\n                        if (datablock[k] < -2048) dcac_col[j] = -2048;\n                        sgn_coeff = -1;\n                    }\n                    temp = (int32)QP * (2 * datablock[k] + sgn_coeff) - sgn_coeff + (QP & 1) * sgn_coeff;\n                    if (temp > 2047) temp = 2047;            /*  03/14/01 */\n                    else if (temp < -2048)  temp = -2048;\n                    datablock[k] = (int16) temp;\n#ifdef FAST_IDCT\n                    bitmapcol[0] |= mask[k>>3];\n#endif\n\n                }\n            }\n\n            for (k = 1; k < 8; k++)\n            {\n                if (datablock[k])\n                {\n                    sgn_coeff = (datablock[k] > 0) ? 1 : -1;\n                    temp = (int32)QP * (2 * datablock[k] + sgn_coeff) - sgn_coeff + (QP & 1) * sgn_coeff;\n                    if (temp > 2047) temp = 2047;            /*  03/14/01 */\n                    else if (temp < -2048)  temp = -2048;\n                    datablock[k] = (int16) temp;\n#ifdef FAST_IDCT\n                    bitmapcol[k] |= 128;\n#endif\n\n                }\n            }\n        }\n        else\n        {\n\n            dcac_col[0]  = datablock[8];\n            dcac_col[1]  = datablock[16];\n            dcac_col[2]  = datablock[24];\n            dcac_col[3]  = datablock[32];\n            dcac_col[4]  = datablock[40];\n            dcac_col[5]  = datablock[48];\n            dcac_col[6]  = datablock[56];\n\n\n            for (j = 0, k = 1; k < 8; k++, j++)\n            {\n                dcac_row[j] = datablock[k];\n                if (dcac_row[j])\n                {\n                    if (datablock[k] > 0)\n                    {\n                        if (datablock[k] > 2047) dcac_row[j] = 2047;\n                        sgn_coeff = 1;\n                    }\n                    else\n                    {\n                        if (datablock[k] < -2048) dcac_row[j] = -2048;\n                        sgn_coeff = -1;\n                    }\n\n                    temp = (int32)QP * (2 * datablock[k] + sgn_coeff) - sgn_coeff + (QP & 1) * sgn_coeff;\n                    if (temp > 2047) temp = 2047;            /*  03/14/01 */\n                    else if (temp < -2048)  temp = -2048;\n                    datablock[k] = (int) temp;\n#ifdef FAST_IDCT\n                    bitmapcol[k] |= 128;\n#endif\n\n                }\n            }\n            for (k = 8; k < 64; k += 8)\n            {\n                if (datablock[k])\n                {\n                    sgn_coeff = (datablock[k] > 0) ? 1 : -1;\n                    temp = (int32)QP * (2 * datablock[k] + sgn_coeff) - sgn_coeff + (QP & 1) * sgn_coeff;\n                    if (temp > 2047) temp = 2047;            /*  03/14/01 */\n                    else if (temp < -2048)  temp = -2048;\n                    datablock[k] = (int16) temp;\n#ifdef FAST_IDCT\n                    bitmapcol[0] |= mask[k>>3];\n#endif\n                }\n            }\n\n        }\n    }\n    else\n    {\n        dcac_row[0]  = datablock[1];\n        dcac_row[1]  = datablock[2];\n        dcac_row[2]  = datablock[3];\n        dcac_row[3]  = datablock[4];\n        dcac_row[4]  = datablock[5];\n        dcac_row[5]  = datablock[6];\n        dcac_row[6]  = datablock[7];\n\n        dcac_col[0]  = datablock[8];\n        dcac_col[1]  = datablock[16];\n        dcac_col[2]  = datablock[24];\n        dcac_col[3]  = datablock[32];\n        dcac_col[4]  = datablock[40];\n        dcac_col[5]  = datablock[48];\n        dcac_col[6]  = datablock[56];\n\n        for (k = 1; k < 8; k++)\n        {\n            if (datablock[k])\n            {\n                sgn_coeff = (datablock[k] > 0) ? 1 : -1;\n                temp = (int32)QP * (2 * datablock[k] + sgn_coeff) - sgn_coeff + (QP & 1) * sgn_coeff;\n                if (temp > 2047) temp = 2047;            /*  03/14/01 */\n                else if (temp < -2048)  temp = -2048;\n                datablock[k] = (int16) temp;\n#ifdef FAST_IDCT\n                bitmapcol[k] |= 128;\n#endif\n            }\n        }\n        for (k = 8; k < 64; k += 8)\n        {\n            if (datablock[k])\n            {\n                sgn_coeff = (datablock[k] > 0) ? 1 : -1;\n                temp = (int32)QP * (2 * datablock[k] + sgn_coeff) - sgn_coeff + (QP & 1) * sgn_coeff;\n                if (temp > 2047) temp = 2047;            /*  03/14/01 */\n                else if (temp < -2048)  temp = -2048;\n                datablock[k] = (int16) temp;\n#ifdef FAST_IDCT\n                bitmapcol[0] |= mask[k>>3];\n#endif\n            }\n        }\n    }\n    if (datablock[0])\n    {\n#ifdef FAST_IDCT\n        bitmapcol[0] |= 128;\n#endif\n\n        temp = (int32)datablock[0] * dc_scaler;\n        if (temp > 2047) temp = 2047;            /*  03/14/01 */\n        else if (temp < -2048)  temp = -2048;\n        datablock[0] = (int16)temp;\n    }\n\n\n#ifdef FAST_IDCT\n    if (i > 10)\n    {\n        for (k = 1; k < 4; k++)  /* if i > 10 then k = 0 does not matter  */\n        {\n            if (bitmapcol[k] != 0)\n            {\n                (*bitmaprow) |= mask[k]; /* (1<<(7-i)); */\n            }\n        }\n    }\n#endif\n\n    /* Store the qcoeff-values needed later for prediction */\n    (*DC)[comp] = datablock[0];\n    return i;\n}\n\nint VlcDequantH263IntraBlock_SH(VideoDecData *video, int comp, uint8 *bitmapcol, uint8 *bitmaprow)\n{\n    BitstreamDecVideo *stream = video->bitstream;\n    int16 *datablock = video->mblock->block[comp]; /*, 10/20/2000, assume it has been reset of all-zero !!!*/\n    int32 temp;\n    int mbnum = video->mbnum;\n    uint CBP = video->headerInfo.CBP[mbnum];\n    int16 QP = video->QPMB[mbnum];\n    typeDCStore *DC = video->predDC + mbnum;\n    int x_pos = video->mbnum_col;\n    typeDCACStore *DCAC_row = video->predDCAC_row + x_pos;\n    typeDCACStore *DCAC_col = video->predDCAC_col;\n    uint ACpred_flag = (uint) video->acPredFlag[mbnum];\n\n    /*** VLC *****/\n    int i, k;\n    Tcoef run_level;\n    int last, return_status;\n    VlcDecFuncP vlcDecCoeff;\n#ifdef PV_ANNEX_IJKT_SUPPORT\n    int direction;\n    const int *inv_zigzag;\n#endif\n    /*** Quantizer ****/\n\n\n\n    const int B_Xtab[6] = {0, 1, 0, 1, 2, 3};\n    const int B_Ytab[6] = {0, 0, 1, 1, 2, 3};\n\n    int16 *dcac_row, *dcac_col;\n\n    dcac_row = (*DCAC_row)[B_Xtab[comp]];\n    dcac_col = (*DCAC_col)[B_Ytab[comp]];\n    i = 1;\n\n#ifdef FAST_IDCT\n    *((uint32*)bitmapcol) = *((uint32*)(bitmapcol + 4)) = 0;\n    *bitmaprow = 0;\n#endif\n\n    /* select which Huffman table to be used */\n    vlcDecCoeff = video->vlcDecCoeffIntra;\n\n#ifdef PV_ANNEX_IJKT_SUPPORT\n    if (comp > 3)        /* ANNEX_T */\n    {\n        QP = video->QP_CHR;\n    }\n    if (!video->advanced_INTRA)\n    {\n#endif\n\n        if ((CBP & (1 << (5 - comp))) == 0)\n        {\n#ifdef FAST_IDCT\n            bitmapcol[0] = 128;\n            bitmapcol[1] = bitmapcol[2] = bitmapcol[3] = bitmapcol[4] = bitmapcol[5] = bitmapcol[6] = bitmapcol[7] = 0;\n#endif\n            datablock[0] <<= 3;  /* no need to clip */\n            return 1;//ncoeffs;\n        }\n        else\n        {\n            /* enter the zero run decoding loop */\n            do\n            {\n                return_status = (*vlcDecCoeff)(stream, &run_level);\n                if (return_status != PV_SUCCESS)\n                {\n                    last = 1;/*  11/1/2000 let it slips undetected, just like\n                             in original version */\n                    i = VLC_ERROR;\n                    break;\n                }\n\n                i += run_level.run;\n                last = run_level.last;\n                if (i >= 64)\n                {\n                    /*  i = NCOEFF_BLOCK; */    /*  11/1/00 */\n                    i = VLC_NO_LAST_BIT;\n                    last = 1;\n                    break;\n                }\n                k = zigzag_inv[i];\n\n                if (run_level.sign == 0)\n                {\n                    temp = (int32)QP * (2 * run_level.level + 1) - 1 + (QP & 1);\n                    if (temp > 2047) temp = 2047;\n                }\n                else\n                {\n                    temp = -(int32)QP * (2 * run_level.level + 1) + 1 - (QP & 1);\n                    if (temp < -2048) temp = -2048;\n                }\n\n\n                datablock[k] = (int16) temp;\n\n#ifdef FAST_IDCT\n                bitmapcol[k&0x7] |= mask[k>>3];\n#endif\n                i++;\n            }\n            while (!last);\n\n        }\n        /* no ACDC prediction when ACDC disable  */\n        if (datablock[0])\n        {\n#ifdef FAST_IDCT\n            bitmapcol[0] |= 128;\n#endif\n            datablock[0] <<= 3;        /* no need to clip  09/18/2001 */\n        }\n#ifdef PV_ANNEX_IJKT_SUPPORT\n    }\n    else  /* advanced_INTRA mode */\n    {\n        i = 1;\n        doDCACPrediction_I(video, comp, datablock);\n        /* perform only VLC decoding */\n        if (!ACpred_flag)\n        {\n            direction = 0;\n        }\n        else\n        {\n            direction = video->mblock->direction;\n        }\n\n        inv_zigzag = zigzag_inv + (ACpred_flag << 6) + (direction << 6);  /*  04/17/01 */\n\n        if (CBP & (1 << (5 - comp)))\n        {\n            i = 0;\n            do\n            {\n                return_status = (*vlcDecCoeff)(stream, &run_level);\n                if (return_status != PV_SUCCESS)\n                {\n                    last = 1;/*  11/1/2000 let it slips undetected, just like\n                                 in original version */\n                    i = VLC_ERROR;\n                    ACpred_flag = 0;   /* no of coefficients should not get reset   03/07/2002 */\n                    break;\n                }\n\n                i += run_level.run;\n                last = run_level.last;\n                if (i >= 64)\n                {\n                    /*                  i = NCOEFF_BLOCK; */    /*  11/1/00 */\n                    ACpred_flag = 0;    /* no of coefficients should not get reset   03/07/2002 */\n                    i = VLC_NO_LAST_BIT;\n                    last = 1;\n                    break;\n                }\n\n                k = inv_zigzag[i];\n\n                if (run_level.sign == 0)\n                {\n                    datablock[k] += (int16)QP * 2 * run_level.level;\n                    if (datablock[k] > 2047) datablock[k] = 2047;\n                }\n                else\n                {\n                    datablock[k] -= (int16)QP * 2 * run_level.level;\n                    if (datablock[k] < -2048) datablock[k] = -2048;\n                }\n#ifdef FAST_IDCT\n                bitmapcol[k&0x7] |= mask[k>>3];\n#endif\n\n                i++;\n            }\n            while (!last);\n\n        }\n        ///// NEED TO DEQUANT THOSE PREDICTED AC COEFF\n        /* dequantize the rest of AC predicted coeff that haven't been dequant */\n\n        if (ACpred_flag)\n        {\n            i = NCOEFF_BLOCK;\n            for (k = 1; k < 8; k++)\n            {\n                if (datablock[k])\n                {\n                    bitmapcol[k] |= 128;\n                }\n\n                if (datablock[k<<3])\n                {\n                    bitmapcol[0] |= mask[k];\n                }\n            }\n        }\n\n        dcac_row[0]  = datablock[1];\n        dcac_row[1]  = datablock[2];\n        dcac_row[2]  = datablock[3];\n        dcac_row[3]  = datablock[4];\n        dcac_row[4]  = datablock[5];\n        dcac_row[5]  = datablock[6];\n        dcac_row[6]  = datablock[7];\n\n        dcac_col[0]  = datablock[8];\n        dcac_col[1]  = datablock[16];\n        dcac_col[2]  = datablock[24];\n        dcac_col[3]  = datablock[32];\n        dcac_col[4]  = datablock[40];\n        dcac_col[5]  = datablock[48];\n        dcac_col[6]  = datablock[56];\n\n        if (datablock[0])\n        {\n#ifdef FAST_IDCT\n            bitmapcol[0] |= 128;\n#endif\n\n            datablock[0] |= 1;\n            if (datablock[0] < 0)\n            {\n                datablock[0] = 0;\n            }\n        }\n    }\n#endif\n\n#ifdef FAST_IDCT\n    if (i > 10)\n    {\n        for (k = 1; k < 4; k++)  /* if i > 10 then k = 0 does not matter  */\n        {\n            if (bitmapcol[k] != 0)\n            {\n                (*bitmaprow) |= mask[k]; /* (1<<(7-i)); */\n            }\n        }\n    }\n#endif\n\n    /* Store the qcoeff-values needed later for prediction */\n    (*DC)[comp] = datablock[0];\n    return i;\n}\n\n/***********************************************************CommentBegin******\n*\n* -- VlcDequantInterH263Block -- Decodes the DCT coefficients of one 8x8 block and perform\n            dequantization in H.263 mode for INTER block.\n    Date:       08/08/2000\n    Modified:             3/21/01\n                clean up, added clipping, 16-bit int case\n******************************************************************************/\n\n\nint VlcDequantH263InterBlock(VideoDecData *video, int comp,\n                             uint8 *bitmapcol, uint8 *bitmaprow)\n{\n    BitstreamDecVideo *stream = video->bitstream;\n    int16 *datablock = video->mblock->block[comp]; /* 10/20/2000, assume it has been reset of all-zero !!!*/\n    int32 temp;\n    int mbnum = video->mbnum;\n    int QP = video->QPMB[mbnum];\n\n    /*** VLC *****/\n    int i, k;\n    Tcoef run_level;\n    int last, return_status;\n    VlcDecFuncP vlcDecCoeff;\n\n    /*** Quantizer ****/\n\n\n    i = 0;\n\n#ifdef FAST_IDCT\n    *((uint32*)bitmapcol) = *((uint32*)(bitmapcol + 4)) = 0;\n    *bitmaprow = 0;\n#endif\n\n    /* select which Huffman table to be used */\n    vlcDecCoeff = video->vlcDecCoeffInter;\n\n    /* enter the zero run decoding loop */\n    do\n    {\n        return_status = (*vlcDecCoeff)(stream, &run_level);\n        if (return_status != PV_SUCCESS)\n        {\n\n\n            last = 1;/*  11/1/2000 let it slips undetected, just like\n                     in original version */\n            i = -1;\n            break;\n        }\n\n        i += run_level.run;\n        last = run_level.last;\n        if (i >= 64)\n        {\n            i = -1;\n            last = 1;\n            break;\n        }\n\n        if (run_level.sign == 0)\n        {\n            temp = (int32)QP * (2 * run_level.level + 1) - 1 + (QP & 1);\n            if (temp > 2047) temp = 2047;\n\n        }\n        else\n        {\n            temp = -(int32)QP * (2 * run_level.level + 1) + 1 - (QP & 1);\n            if (temp < -2048) temp = -2048;\n        }\n\n        k = zigzag_inv[i];\n        datablock[k] = (int16)temp;\n#ifdef FAST_IDCT\n        bitmapcol[k&0x7] |= mask[k>>3];\n#endif\n        i++;\n    }\n    while (!last);\n\n#ifdef FAST_IDCT\n    if (i > 10)         /*  07/19/01 */\n    {\n        for (k = 1; k < 4; k++)       /*  if (i > 10 ) k = 0 does not matter */\n        {\n            if (bitmapcol[k] != 0)\n            {\n                (*bitmaprow) |= mask[k]; /* (1<<(7-i)); */\n            }\n        }\n    }\n#endif\n    return i;\n}\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/vlc_tab.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include    \"mp4dec_api.h\"\n#include    \"mp4def.h\"\n#include    \"mp4lib_int.h\"\n#include    \"vlc_dec_tab.h\"\n#include    \"max_level.h\"\n\n\nconst int intra_max_level[2][NCOEFF_BLOCK] =\n{\n    {27, 10,  5,  4,  3,  3,  3,  3,\n        2,  2,  1,  1,  1,  1,  1,  0,\n        0,  0,  0,  0,  0,  0,  0,  0,\n        0,  0,  0,  0,  0,  0,  0,  0,\n        0,  0,  0,  0,  0,  0,  0,  0,\n        0,  0,  0,  0,  0,  0,  0,  0,\n        0,  0,  0,  0,  0,  0,  0,  0,\n        0,  0,  0,  0,  0,  0,  0,  0,\n    },\n\n    {8,  3,  2,  2,  2,  2,  2,  1,\n     1,  1,  1,  1,  1,  1,  1,  1,\n     1,  1,  1,  1,  1,  0,  0,  0,\n     0,  0,  0,  0,  0,  0,  0,  0,\n     0,  0,  0,  0,  0,  0,  0,  0,\n     0,  0,  0,  0,  0,  0,  0,  0,\n     0,  0,  0,  0,  0,  0,  0,  0,\n     0,  0,  0,  0,  0,  0,  0,  0\n    }\n};\n\n\nconst int inter_max_level[2][NCOEFF_BLOCK] =\n{\n    {12,  6,  4,  3,  3,  3,  3,  2,\n        2,  2,  2,  1,  1,  1,  1,  1,\n        1,  1,  1,  1,  1,  1,  1,  1,\n        1,  1,  1,  0,  0,  0,  0,  0,\n        0,  0,  0,  0,  0,  0,  0,  0,\n        0,  0,  0,  0,  0,  0,  0,  0,\n        0,  0,  0,  0,  0,  0,  0,  0,\n        0,  0,  0,  0,  0,  0,  0,  0},\n\n    {3,  2,  1,  1,  1,  1,  1,  1,\n     1,  1,  1,  1,  1,  1,  1,  1,\n     1,  1,  1,  1,  1,  1,  1,  1,\n     1,  1,  1,  1,  1,  1,  1,  1,\n     1,  1,  1,  1,  1,  1,  1,  1,\n     1,  0,  0,  0,  0,  0,  0,  0,\n     0,  0,  0,  0,  0,  0,  0,  0,\n     0,  0,  0,  0,  0,  0,  0,  0}\n};\n\n\nconst int intra_max_run0[28] = { 999, 14,  9,  7,  3,  2,  1,\n                                 1,  1,  1,  1,  0,  0,  0,\n                                 0,  0,  0,  0,  0,  0,  0,\n                                 0,  0,  0,  0,  0,  0,  0\n                               };\n\n\nconst int intra_max_run1[9] = { 999, 20,  6,\n                                1,  0,  0,\n                                0,  0,  0\n                              };\n\nconst int inter_max_run0[13] = { 999,\n                                 26, 10,  6,  2,  1,  1,\n                                 0,  0,  0,  0,  0,  0\n                               };\n\n\nconst int inter_max_run1[4] = { 999, 40,  1,  0 };\n\nconst VLCshorttab PV_TMNMVtab0[] =\n{\n    {3, 4}, { -3, 4}, {2, 3}, {2, 3}, { -2, 3}, { -2, 3}, {1, 2}, {1, 2}, {1, 2}, {1, 2},\n    { -1, 2}, { -1, 2}, { -1, 2}, { -1, 2}\n};\n\nconst VLCshorttab PV_TMNMVtab1[] =\n{\n    {12, 10}, { -12, 10}, {11, 10}, { -11, 10}, {10, 9}, {10, 9}, { -10, 9}, { -10, 9},\n    {9, 9}, {9, 9}, { -9, 9}, { -9, 9}, {8, 9}, {8, 9}, { -8, 9}, { -8, 9}, {7, 7}, {7, 7},\n    {7, 7}, {7, 7}, {7, 7}, {7, 7}, {7, 7}, {7, 7}, { -7, 7}, { -7, 7}, { -7, 7}, { -7, 7},\n    { -7, 7}, { -7, 7}, { -7, 7}, { -7, 7}, {6, 7}, {6, 7}, {6, 7}, {6, 7}, {6, 7}, {6, 7},\n    {6, 7}, {6, 7}, { -6, 7}, { -6, 7}, { -6, 7}, { -6, 7}, { -6, 7}, { -6, 7}, { -6, 7},\n    { -6, 7}, {5, 7}, {5, 7}, {5, 7}, {5, 7}, {5, 7}, {5, 7}, {5, 7}, {5, 7}, { -5, 7},\n    { -5, 7}, { -5, 7}, { -5, 7}, { -5, 7}, { -5, 7}, { -5, 7}, { -5, 7}, {4, 6}, {4, 6}, {4, 6},\n    {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6},\n    {4, 6}, {4, 6}, { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6},\n    { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6}, { -4, 6}\n};\n\nconst VLCshorttab PV_TMNMVtab2[] =\n{\n    {32, 12}, { -32, 12}, {31, 12}, { -31, 12}, {30, 11}, {30, 11}, { -30, 11}, { -30, 11},\n    {29, 11}, {29, 11}, { -29, 11}, { -29, 11}, {28, 11}, {28, 11}, { -28, 11}, { -28, 11},\n    {27, 11}, {27, 11}, { -27, 11}, { -27, 11}, {26, 11}, {26, 11}, { -26, 11}, { -26, 11},\n    {25, 11}, {25, 11}, { -25, 11}, { -25, 11}, {24, 10}, {24, 10}, {24, 10}, {24, 10},\n    { -24, 10}, { -24, 10}, { -24, 10}, { -24, 10}, {23, 10}, {23, 10}, {23, 10}, {23, 10},\n    { -23, 10}, { -23, 10}, { -23, 10}, { -23, 10}, {22, 10}, {22, 10}, {22, 10}, {22, 10},\n    { -22, 10}, { -22, 10}, { -22, 10}, { -22, 10}, {21, 10}, {21, 10}, {21, 10}, {21, 10},\n    { -21, 10}, { -21, 10}, { -21, 10}, { -21, 10}, {20, 10}, {20, 10}, {20, 10}, {20, 10},\n    { -20, 10}, { -20, 10}, { -20, 10}, { -20, 10}, {19, 10}, {19, 10}, {19, 10}, {19, 10},\n    { -19, 10}, { -19, 10}, { -19, 10}, { -19, 10}, {18, 10}, {18, 10}, {18, 10}, {18, 10},\n    { -18, 10}, { -18, 10}, { -18, 10}, { -18, 10}, {17, 10}, {17, 10}, {17, 10}, {17, 10},\n    { -17, 10}, { -17, 10}, { -17, 10}, { -17, 10}, {16, 10}, {16, 10}, {16, 10}, {16, 10},\n    { -16, 10}, { -16, 10}, { -16, 10}, { -16, 10}, {15, 10}, {15, 10}, {15, 10}, {15, 10},\n    { -15, 10}, { -15, 10}, { -15, 10}, { -15, 10}, {14, 10}, {14, 10}, {14, 10}, {14, 10},\n    { -14, 10}, { -14, 10}, { -14, 10}, { -14, 10}, {13, 10}, {13, 10}, {13, 10}, {13, 10},\n    { -13, 10}, { -13, 10}, { -13, 10}, { -13, 10}\n};\n\nconst VLCshorttab PV_MCBPCtab[] =\n{\n    {VLC_ERROR, 0},\n    {255, 9}, {52, 9}, {36, 9}, {20, 9}, {49, 9}, {35, 8}, {35, 8}, {19, 8}, {19, 8},\n    {50, 8}, {50, 8}, {51, 7}, {51, 7}, {51, 7}, {51, 7}, {34, 7}, {34, 7}, {34, 7},\n    {34, 7}, {18, 7}, {18, 7}, {18, 7}, {18, 7}, {33, 7}, {33, 7}, {33, 7}, {33, 7},\n    {17, 7}, {17, 7}, {17, 7}, {17, 7}, {4, 6}, {4, 6}, {4, 6}, {4, 6}, {4, 6},\n    {4, 6}, {4, 6}, {4, 6}, {48, 6}, {48, 6}, {48, 6}, {48, 6}, {48, 6}, {48, 6},\n    {48, 6}, {48, 6}, {3, 5}, {3, 5}, {3, 5}, {3, 5}, {3, 5}, {3, 5}, {3, 5},\n    {3, 5}, {3, 5}, {3, 5}, {3, 5}, {3, 5}, {3, 5}, {3, 5}, {3, 5}, {3, 5},\n    {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4},\n    {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4},\n    {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4},\n    {32, 4}, {32, 4}, {32, 4}, {32, 4}, {32, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4},\n    {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4},\n    {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4},\n    {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4}, {16, 4},\n    {16, 4}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3},\n    {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3},\n    {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3},\n    {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3},\n    {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3},\n    {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3},\n    {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3}, {2, 3},\n    {2, 3}, {2, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3},\n    {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3},\n    {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3},\n    {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3},\n    {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3},\n    {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3},\n    {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3}, {1, 3},\n    {1, 3}, {1, 3}, {1, 3}\n};\n\n#ifdef PV_ANNEX_IJKT_SUPPORT\nconst VLCshorttab PV_MCBPCtab1[] =\n{\n    {5, 11}, {5, 11},  {5, 11}, {5, 11}, {21, 13}, {21, 13}, {37, 13}, {53, 13},\n};\n#endif\nconst VLCshorttab PV_MCBPCtabintra[] =\n{\n    {VLC_ERROR, 0},\n    {20, 6}, {36, 6}, {52, 6}, {4, 4}, {4, 4}, {4, 4},\n    {4, 4}, {19, 3}, {19, 3}, {19, 3}, {19, 3}, {19, 3},\n    {19, 3}, {19, 3}, {19, 3}, {35, 3}, {35, 3}, {35, 3},\n    {35, 3}, {35, 3}, {35, 3}, {35, 3}, {35, 3}, {51, 3},\n    {51, 3}, {51, 3}, {51, 3}, {51, 3}, {51, 3}, {51, 3},\n    {51, 3}\n};\n\n\n\nconst VLCshorttab PV_CBPYtab[48] =\n{\n    {VLC_ERROR, 0}, {VLC_ERROR, 0}, {6, 6}, {9, 6}, {8, 5}, {8, 5}, {4, 5}, {4, 5},\n    {2, 5}, {2, 5}, {1, 5}, {1, 5}, {0, 4}, {0, 4}, {0, 4}, {0, 4},\n    {12, 4}, {12, 4}, {12, 4}, {12, 4}, {10, 4}, {10, 4}, {10, 4}, {10, 4},\n    {14, 4}, {14, 4}, {14, 4}, {14, 4}, {5, 4}, {5, 4}, {5, 4}, {5, 4},\n    {13, 4}, {13, 4}, {13, 4}, {13, 4}, {3, 4}, {3, 4}, {3, 4}, {3, 4},\n    {11, 4}, {11, 4}, {11, 4}, {11, 4}, {7, 4}, {7, 4}, {7, 4}, {7, 4}\n};\n\n\n\nconst VLCtab2 PV_DCT3Dtab0[] =\n{\n    {0x8, 1, 1, 7}, {0x7, 1, 1, 7}, {0x6, 1, 1, 7}, {0x5, 1, 1, 7}, {0xc, 1, 0, 7}, {0xb, 1, 0, 7},\n    {0xa, 1, 0, 7}, {0x0, 4, 0, 7}, {0x4, 1, 1, 6}, {0x4, 1, 1, 6}, {0x3, 1, 1, 6}, {0x3, 1, 1, 6},\n    {0x2, 1, 1, 6}, {0x2, 1, 1, 6}, {0x1, 1, 1, 6}, {0x1, 1, 1, 6}, {0x9, 1, 0, 6}, {0x9, 1, 0, 6},\n    {0x8, 1, 0, 6}, {0x8, 1, 0, 6}, {0x7, 1, 0, 6}, {0x7, 1, 0, 6}, {0x6, 1, 0, 6}, {0x6, 1, 0, 6},\n    {0x1, 2, 0, 6}, {0x1, 2, 0, 6}, {0x0, 3, 0, 6}, {0x0, 3, 0, 6}, {0x5, 1, 0, 5}, {0x5, 1, 0, 5},\n    {0x5, 1, 0, 5}, {0x5, 1, 0, 5}, {0x4, 1, 0, 5}, {0x4, 1, 0, 5}, {0x4, 1, 0, 5}, {0x4, 1, 0, 5},\n    {0x3, 1, 0, 5}, {0x3, 1, 0, 5}, {0x3, 1, 0, 5}, {0x3, 1, 0, 5}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4},\n    {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4},\n    {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2},\n    {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2},\n    {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2},\n    {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2},\n    {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2},\n    {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3},\n    {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3},\n    {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3}, {0x1, 1, 0, 3},\n    {0x2, 1, 0, 4}, {0x2, 1, 0, 4}, {0x2, 1, 0, 4}, {0x2, 1, 0, 4}, {0x2, 1, 0, 4}, {0x2, 1, 0, 4},\n    {0x2, 1, 0, 4}, {0x2, 1, 0, 4}, {0x0, 2, 0, 4}, {0x0, 2, 0, 4}, {0x0, 2, 0, 4}, {0x0, 2, 0, 4},\n    {0x0, 2, 0, 4}, {0x0, 2, 0, 4}, {0x0, 2, 0, 4}, {0x0, 2, 0, 4}\n};\n\n\nconst VLCtab2 PV_DCT3Dtab1[] =\n{\n    {0x0, 9, 0, 10}, {0x0, 8, 0, 10}, {0x18, 1, 1, 9}, {0x18, 1, 1, 9}, {0x17, 1, 1, 9}, {0x17, 1, 1, 9},\n    {0x16, 1, 1, 9}, {0x16, 1, 1, 9}, {0x15, 1, 1, 9}, {0x15, 1, 1, 9}, {0x14, 1, 1, 9}, {0x14, 1, 1, 9},\n    {0x13, 1, 1, 9}, {0x13, 1, 1, 9}, {0x12, 1, 1, 9}, {0x12, 1, 1, 9}, {0x11, 1, 1, 9}, {0x11, 1, 1, 9},\n    {0x0, 2, 1, 9}, {0x0, 2, 1, 9}, {0x16, 1, 0, 9}, {0x16, 1, 0, 9}, {0x15, 1, 0, 9}, {0x15, 1, 0, 9},\n    {0x14, 1, 0, 9}, {0x14, 1, 0, 9}, {0x13, 1, 0, 9}, {0x13, 1, 0, 9}, {0x12, 1, 0, 9}, {0x12, 1, 0, 9},\n    {0x11, 1, 0, 9}, {0x11, 1, 0, 9}, {0x10, 1, 0, 9}, {0x10, 1, 0, 9}, {0xf, 1, 0, 9}, {0xf, 1, 0, 9},\n    {0x4, 2, 0, 9}, {0x4, 2, 0, 9}, {0x3, 2, 0, 9}, {0x3, 2, 0, 9}, {0x0, 7, 0, 9}, {0x0, 7, 0, 9},\n    {0x0, 6, 0, 9}, {0x0, 6, 0, 9}, {0x10, 1, 1, 8}, {0x10, 1, 1, 8}, {0x10, 1, 1, 8}, {0x10, 1, 1, 8},\n    {0xf, 1, 1, 8}, {0xf, 1, 1, 8}, {0xf, 1, 1, 8}, {0xf, 1, 1, 8}, {0xe, 1, 1, 8}, {0xe, 1, 1, 8},\n    {0xe, 1, 1, 8}, {0xe, 1, 1, 8}, {0xd, 1, 1, 8}, {0xd, 1, 1, 8}, {0xd, 1, 1, 8}, {0xd, 1, 1, 8},\n    {0xc, 1, 1, 8}, {0xc, 1, 1, 8}, {0xc, 1, 1, 8}, {0xc, 1, 1, 8}, {0xb, 1, 1, 8}, {0xb, 1, 1, 8},\n    {0xb, 1, 1, 8}, {0xb, 1, 1, 8}, {0xa, 1, 1, 8}, {0xa, 1, 1, 8}, {0xa, 1, 1, 8}, {0xa, 1, 1, 8},\n    {0x9, 1, 1, 8}, {0x9, 1, 1, 8}, {0x9, 1, 1, 8}, {0x9, 1, 1, 8}, {0xe, 1, 0, 8}, {0xe, 1, 0, 8},\n    {0xe, 1, 0, 8}, {0xe, 1, 0, 8}, {0xd, 1, 0, 8}, {0xd, 1, 0, 8}, {0xd, 1, 0, 8}, {0xd, 1, 0, 8},\n    {0x2, 2, 0, 8}, {0x2, 2, 0, 8}, {0x2, 2, 0, 8}, {0x2, 2, 0, 8}, {0x1, 3, 0, 8}, {0x1, 3, 0, 8},\n    {0x1, 3, 0, 8}, {0x1, 3, 0, 8}, {0x0, 5, 0, 8}, {0x0, 5, 0, 8}, {0x0, 5, 0, 8}, {0x0, 5, 0, 8}\n};\n\n\nconst VLCtab2 PV_DCT3Dtab2[] =\n{\n    {0x1, 2, 1, 11}, {0x1, 2, 1, 11}, {0x0, 3, 1, 11}, {0x0, 3, 1, 11}, {0x0, 0xb, 0, 11}, {0x0, 0xb, 0, 11},\n    {0x0, 0xa, 0, 11}, {0x0, 0xa, 0, 11}, {0x1c, 1, 1, 10}, {0x1c, 1, 1, 10}, {0x1c, 1, 1, 10}, {0x1c, 1, 1, 10},\n    {0x1b, 1, 1, 10}, {0x1b, 1, 1, 10}, {0x1b, 1, 1, 10}, {0x1b, 1, 1, 10}, {0x1a, 1, 1, 10}, {0x1a, 1, 1, 10},\n    {0x1a, 1, 1, 10}, {0x1a, 1, 1, 10}, {0x19, 1, 1, 10}, {0x19, 1, 1, 10}, {0x19, 1, 1, 10}, {0x19, 1, 1, 10},\n    {0x9, 2, 0, 10}, {0x9, 2, 0, 10}, {0x9, 2, 0, 10}, {0x9, 2, 0, 10}, {0x8, 2, 0, 10}, {0x8, 2, 0, 10},\n    {0x8, 2, 0, 10}, {0x8, 2, 0, 10}, {0x7, 2, 0, 10}, {0x7, 2, 0, 10}, {0x7, 2, 0, 10}, {0x7, 2, 0, 10},\n    {0x6, 2, 0, 10}, {0x6, 2, 0, 10}, {0x6, 2, 0, 10}, {0x6, 2, 0, 10}, {0x5, 2, 0, 10}, {0x5, 2, 0, 10},\n    {0x5, 2, 0, 10}, {0x5, 2, 0, 10}, {0x3, 3, 0, 10}, {0x3, 3, 0, 10}, {0x3, 3, 0, 10}, {0x3, 3, 0, 10},\n    {0x2, 3, 0, 10}, {0x2, 3, 0, 10}, {0x2, 3, 0, 10}, {0x2, 3, 0, 10}, {0x1, 4, 0, 10}, {0x1, 4, 0, 10},\n    {0x1, 4, 0, 10}, {0x1, 4, 0, 10}, {0x0, 0xc, 0, 11}, {0x0, 0xc, 0, 11}, {0x1, 5, 0, 11}, {0x1, 5, 0, 11},\n    {0x17, 1, 0, 11}, {0x17, 1, 0, 11}, {0x18, 1, 0, 11}, {0x18, 1, 0, 11}, {0x1d, 1, 1, 11}, {0x1d, 1, 1, 11},\n    {0x1e, 1, 1, 11}, {0x1e, 1, 1, 11}, {0x1f, 1, 1, 11}, {0x1f, 1, 1, 11}, {0x20, 1, 1, 11}, {0x20, 1, 1, 11},\n    {0x1, 6, 0, 12}, {0x2, 4, 0, 12}, {0x4, 3, 0, 12}, {0x5, 3, 0, 12}, {0x6, 3, 0, 12}, {0xa, 2, 0, 12},\n    {0x19, 1, 0, 12}, {0x1a, 1, 0, 12}, {0x21, 1, 1, 12}, {0x22, 1, 1, 12}, {0x23, 1, 1, 12}, {0x24, 1, 1, 12},\n    {0x25, 1, 1, 12}, {0x26, 1, 1, 12}, {0x27, 1, 1, 12}, {0x28, 1, 1, 12}, {0xbf, 0xf, 1, 7},\n    {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7},\n    {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7},\n    {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7},\n    {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7},\n    {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7}, {0xbf, 0xf, 1, 7},\n    {0xbf, 0xf, 1, 7}\n};\n\n/* New tables for Intra luminance blocks */\n\nconst VLCtab2 PV_DCT3Dtab3[] =\n{\n    {0x4, 1, 1, 7}, {0x3, 1, 1, 7}, {0x6, 1, 0, 7}, {0x5, 1, 1, 7},\n    {0x7, 1, 0, 7}, {0x2, 2, 0, 7}, {0x1, 3, 0, 7}, {0x0, 9, 0, 7},\n    {0x0, 2, 1, 6}, {0x0, 2, 1, 6}, {0x5, 1, 0, 6}, {0x5, 1, 0, 6},\n    {0x2, 1, 1, 6}, {0x2, 1, 1, 6}, {0x1, 1, 1, 6}, {0x1, 1, 1, 6},\n    {0x4, 1, 0, 6}, {0x4, 1, 0, 6}, {0x3, 1, 0, 6}, {0x3, 1, 0, 6},\n    {0x0, 8, 0, 6}, {0x0, 8, 0, 6}, {0x0, 7, 0, 6}, {0x0, 7, 0, 6},\n    {0x1, 2, 0, 6}, {0x1, 2, 0, 6}, {0x0, 6, 0, 6}, {0x0, 6, 0, 6},\n    {0x2, 1, 0, 5}, {0x2, 1, 0, 5}, {0x2, 1, 0, 5}, {0x2, 1, 0, 5},\n    {0x0, 5, 0, 5}, {0x0, 5, 0, 5}, {0x0, 5, 0, 5}, {0x0, 5, 0, 5},\n    {0x0, 4, 0, 5}, {0x0, 4, 0, 5}, {0x0, 4, 0, 5}, {0x0, 4, 0, 5},\n    {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4},\n    {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4},\n    {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2},\n    {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2},\n    {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2},\n    {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2},\n    {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2},\n    {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2},\n    {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2},\n    {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2},\n    {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3},\n    {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3},\n    {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3},\n    {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3},\n    {0x1, 1, 0, 4}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4},\n    {0x1, 1, 0, 4}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4},\n    {0x0, 3, 0, 4}, {0x0, 3, 0, 4}, {0x0, 3, 0, 4}, {0x0, 3, 0, 4},\n    {0x0, 3, 0, 4}, {0x0, 3, 0, 4}, {0x0, 3, 0, 4}, {0x0, 3, 0, 4}\n};\n\nconst VLCtab2 PV_DCT3Dtab4[] =\n{\n    {0x0, 0x12, 0, 10}, {0x0, 0x11, 0, 10}, {0xe, 1, 1, 9}, {0xe, 1, 1, 9},\n    {0xd, 1, 1, 9}, {0xd, 1, 1, 9}, {0xc, 1, 1, 9}, {0xc, 1, 1, 9},\n    {0xb, 1, 1, 9}, {0xb, 1, 1, 9}, {0xa, 1, 1, 9}, {0xa, 1, 1, 9},\n    {0x1, 2, 1, 9}, {0x1, 2, 1, 9}, {0x0, 4, 1, 9}, {0x0, 4, 1, 9},\n    {0xc, 1, 0, 9}, {0xc, 1, 0, 9}, {0xb, 1, 0, 9}, {0xb, 1, 0, 9},\n    {0x7, 2, 0, 9}, {0x7, 2, 0, 9}, {0x6, 2, 0, 9}, {0x6, 2, 0, 9},\n    {0x5, 2, 0, 9}, {0x5, 2, 0, 9}, {0x3, 3, 0, 9}, {0x3, 3, 0, 9},\n    {0x2, 3, 0, 9}, {0x2, 3, 0, 9}, {0x1, 6, 0, 9}, {0x1, 6, 0, 9},\n    {0x1, 5, 0, 9}, {0x1, 5, 0, 9}, {0x0, 0x10, 0, 9}, {0x0, 0x10, 0, 9},\n    {0x4, 2, 0, 9}, {0x4, 2, 0, 9}, {0x0, 0xf, 0, 9}, {0x0, 0xf, 0, 9},\n    {0x0, 0xe, 0, 9}, {0x0, 0xe, 0, 9}, {0x0, 0xd, 0, 9}, {0x0, 0xd, 0, 9},\n    {0x8, 1, 1, 8}, {0x8, 1, 1, 8}, {0x8, 1, 1, 8}, {0x8, 1, 1, 8},\n    {0x7, 1, 1, 8}, {0x7, 1, 1, 8}, {0x7, 1, 1, 8}, {0x7, 1, 1, 8},\n    {0x6, 1, 1, 8}, {0x6, 1, 1, 8}, {0x6, 1, 1, 8}, {0x6, 1, 1, 8},\n    {0x0, 3, 1, 8}, {0x0, 3, 1, 8}, {0x0, 3, 1, 8}, {0x0, 3, 1, 8},\n    {0xa, 1, 0, 8}, {0xa, 1, 0, 8}, {0xa, 1, 0, 8}, {0xa, 1, 0, 8},\n    {0x9, 1, 0, 8}, {0x9, 1, 0, 8}, {0x9, 1, 0, 8}, {0x9, 1, 0, 8},\n    {0x8, 1, 0, 8}, {0x8, 1, 0, 8}, {0x8, 1, 0, 8}, {0x8, 1, 0, 8},\n    {0x9, 1, 1, 8}, {0x9, 1, 1, 8}, {0x9, 1, 1, 8}, {0x9, 1, 1, 8},\n    {0x3, 2, 0, 8}, {0x3, 2, 0, 8}, {0x3, 2, 0, 8}, {0x3, 2, 0, 8},\n    {0x1, 4, 0, 8}, {0x1, 4, 0, 8}, {0x1, 4, 0, 8}, {0x1, 4, 0, 8},\n    {0x0, 0xc, 0, 8}, {0x0, 0xc, 0, 8}, {0x0, 0xc, 0, 8}, {0x0, 0xc, 0, 8},\n    {0x0, 0xb, 0, 8}, {0x0, 0xb, 0, 8}, {0x0, 0xb, 0, 8}, {0x0, 0xb, 0, 8},\n    {0x0, 0xa, 0, 8}, {0x0, 0xa, 0, 8}, {0x0, 0xa, 0, 8}, {0x0, 0xa, 0, 8}\n};\n\nconst VLCtab2 PV_DCT3Dtab5[] =\n{\n    {0x0, 7, 1, 11}, {0x0, 7, 1, 11}, {0x0, 6, 1, 11}, {0x0, 6, 1, 11},\n    {0x0, 0x16, 0, 11}, {0x0, 0x16, 0, 11}, {0x0, 0x15, 0, 11}, {0x0, 0x15, 0, 11},\n    {0x2, 2, 1, 10}, {0x2, 2, 1, 10}, {0x2, 2, 1, 10}, {0x2, 2, 1, 10},\n    {0x1, 3, 1, 10}, {0x1, 3, 1, 10}, {0x1, 3, 1, 10}, {0x1, 3, 1, 10},\n    {0x0, 5, 1, 10}, {0x0, 5, 1, 10}, {0x0, 5, 1, 10}, {0x0, 5, 1, 10},\n    {0xd, 1, 0, 10}, {0xd, 1, 0, 10}, {0xd, 1, 0, 10}, {0xd, 1, 0, 10},\n    {0x5, 3, 0, 10}, {0x5, 3, 0, 10}, {0x5, 3, 0, 10}, {0x5, 3, 0, 10},\n    {0x8, 2, 0, 10}, {0x8, 2, 0, 10}, {0x8, 2, 0, 10}, {0x8, 2, 0, 10},\n    {0x4, 3, 0, 10}, {0x4, 3, 0, 10}, {0x4, 3, 0, 10}, {0x4, 3, 0, 10},\n    {0x3, 4, 0, 10}, {0x3, 4, 0, 10}, {0x3, 4, 0, 10}, {0x3, 4, 0, 10},\n    {0x2, 4, 0, 10}, {0x2, 4, 0, 10}, {0x2, 4, 0, 10}, {0x2, 4, 0, 10},\n    {0x1, 7, 0, 10}, {0x1, 7, 0, 10}, {0x1, 7, 0, 10}, {0x1, 7, 0, 10},\n    {0x0, 0x14, 0, 10}, {0x0, 0x14, 0, 10}, {0x0, 0x14, 0, 10}, {0x0, 0x14, 0, 10},\n    {0x0, 0x13, 0, 10}, {0x0, 0x13, 0, 10}, {0x0, 0x13, 0, 10}, {0x0, 0x13, 0, 10},\n    {0x0, 0x17, 0, 11}, {0x0, 0x17, 0, 11}, {0x0, 0x18, 0, 11}, {0x0, 0x18, 0, 11},\n    {0x1, 8, 0, 11}, {0x1, 8, 0, 11}, {0x9, 2, 0, 11}, {0x9, 2, 0, 11},\n    {0x3, 2, 1, 11}, {0x3, 2, 1, 11}, {0x4, 2, 1, 11}, {0x4, 2, 1, 11},\n    {0xf, 1, 1, 11}, {0xf, 1, 1, 11}, {0x10, 1, 1, 11}, {0x10, 1, 1, 11},\n    {0, 0x19, 0, 12}, {0, 0x1a, 0, 12}, {0, 0x1b, 0, 12}, {1, 9, 0, 12},\n    {0x6, 3, 0, 12}, {0x1, 0xa, 0, 12}, {0x2, 5, 0, 12}, {0x7, 3, 0, 12},\n    {0xe, 1, 0, 12}, {0x0, 8, 1, 12}, {0x5, 2, 1, 12}, {0x6, 2, 1, 12},\n    {0x11, 1, 1, 12}, {0x12, 1, 1, 12}, {0x13, 1, 1, 12}, {0x14, 1, 1, 12},\n    {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7},\n    {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7},\n    {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7},\n    {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7},\n    {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7},\n    {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7},\n    {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7},\n    {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}, {0x1b, 0xff, 0, 7}\n};\n\n#ifdef PV_ANNEX_IJKT_SUPPORT\nconst VLCtab2 PV_DCT3Dtab6[] =\n{\n    {0x0, 3, 1, 7}, {0x4, 1, 1, 7}, {0x6, 1, 1, 7}, {0x5, 1, 1, 7}, {0x1, 3, 0, 7}, {0x2, 2, 0, 7},\n    {0x0, 9, 0, 7}, {0x5, 1, 0, 7}, {0x0, 2, 1, 6}, {0x0, 2, 1, 6}, {0x3, 1, 1, 6}, {0x3, 1, 1, 6},\n    {0x2, 1, 1, 6}, {0x2, 1, 1, 6}, {0x1, 1, 1, 6}, {0x1, 1, 1, 6}, {0x0, 6, 0, 6}, {0x0, 6, 0, 6},\n    {0x0, 7, 0, 6}, {0x0, 7, 0, 6}, {0x0, 8, 0, 6}, {0x0, 8, 0, 6}, {0x4, 1, 0, 6}, {0x4, 1, 0, 6},\n    {0x1, 2, 0, 6}, {0x1, 2, 0, 6}, {0x3, 1, 0, 6}, {0x3, 1, 0, 6}, {0x2, 1, 0, 5}, {0x2, 1, 0, 5},\n    {0x2, 1, 0, 5}, {0x2, 1, 0, 5}, {0x0, 4, 0, 5}, {0x0, 4, 0, 5}, {0x0, 4, 0, 5}, {0x0, 4, 0, 5},\n    {0x0, 5, 0, 5}, {0x0, 5, 0, 5}, {0x0, 5, 0, 5}, {0x0, 5, 0, 5}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4},\n    {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4}, {0x0, 1, 1, 4},\n    {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2},\n    {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2},\n    {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2},\n    {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2},\n    {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 1, 0, 2},\n    {0x0, 1, 0, 2}, {0x0, 1, 0, 2}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3},\n    {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3},\n    {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3}, {0x0, 2, 0, 3},\n    {0x0, 3, 0, 4}, {0x0, 3, 0, 4}, {0x0, 3, 0, 4}, {0x0, 3, 0, 4}, {0x0, 3, 0, 4}, {0x0, 3, 0, 4},\n    {0x0, 3, 0, 4}, {0x0, 3, 0, 4}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4},\n    {0x1, 1, 0, 4}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4}, {0x1, 1, 0, 4}\n};\n\nconst VLCtab2 PV_DCT3Dtab7[] =\n{\n    {0xb, 1, 0, 10}, {0xa, 1, 0, 10}, {0x0, 5, 1, 9}, {0x0, 5, 1, 9}, {0x0, 6, 1, 9}, {0x0, 6, 1, 9},\n    {0x1, 2, 1, 9}, {0x1, 2, 1, 9}, {0x2, 2, 1, 9}, {0x2, 2, 1, 9}, {0xf, 1, 1, 9}, {0xf, 1, 1, 9},\n    {0x10, 1, 1, 9}, {0x10, 1, 1, 9}, {0x12, 1, 1, 9}, {0x12, 1, 1, 9}, {0x11, 1, 1, 9}, {0x11, 1, 1, 9},\n    {0xe, 1, 1, 9}, {0xe, 1, 1, 9}, {0x0, 13, 0, 9}, {0x0, 13, 0, 9}, {0x0, 14, 0, 9}, {0x0, 14, 0, 9},\n    {0x0, 15, 0, 9}, {0x0, 15, 0, 9}, {0x0, 16, 0, 9}, {0x0, 16, 0, 9}, {0x0, 17, 0, 9}, {0x0, 17, 0, 9},\n    {0x0, 18, 0, 9}, {0x0, 18, 0, 9}, {0x0, 11, 0, 9}, {0x0, 11, 0, 9}, {0x0, 12, 0, 9}, {0x0, 12, 0, 9},\n    {0x5, 2, 0, 9}, {0x5, 2, 0, 9}, {0x4, 2, 0, 9}, {0x4, 2, 0, 9}, {0x9, 1, 0, 9}, {0x9, 1, 0, 9},\n    {0x8, 1, 0, 9}, {0x8, 1, 0, 9}, {0x0, 4, 1, 8}, {0x0, 4, 1, 8}, {0x0, 4, 1, 8}, {0x0, 4, 1, 8},\n    {0x7, 1, 1, 8}, {0x7, 1, 1, 8}, {0x7, 1, 1, 8}, {0x7, 1, 1, 8}, {0x8, 1, 1, 8}, {0x8, 1, 1, 8},\n    {0x8, 1, 1, 8}, {0x8, 1, 1, 8}, {0xd, 1, 1, 8}, {0xd, 1, 1, 8}, {0xd, 1, 1, 8}, {0xd, 1, 1, 8},\n    {0xc, 1, 1, 8}, {0xc, 1, 1, 8}, {0xc, 1, 1, 8}, {0xc, 1, 1, 8}, {0xb, 1, 1, 8}, {0xb, 1, 1, 8},\n    {0xb, 1, 1, 8}, {0xb, 1, 1, 8}, {0xa, 1, 1, 8}, {0xa, 1, 1, 8}, {0xa, 1, 1, 8}, {0xa, 1, 1, 8},\n    {0x9, 1, 1, 8}, {0x9, 1, 1, 8}, {0x9, 1, 1, 8}, {0x9, 1, 1, 8}, {0x0, 10, 0, 8}, {0x0, 10, 0, 8},\n    {0x0, 10, 0, 8}, {0x0, 10, 0, 8}, {0x6, 1, 0, 8}, {0x6, 1, 0, 8}, {0x6, 1, 0, 8}, {0x6, 1, 0, 8},\n    {0x3, 2, 0, 8}, {0x3, 2, 0, 8}, {0x3, 2, 0, 8}, {0x3, 2, 0, 8}, {0x1, 4, 0, 8}, {0x1, 4, 0, 8},\n    {0x1, 4, 0, 8}, {0x1, 4, 0, 8}, {0x7, 1, 0, 8}, {0x7, 1, 0, 8}, {0x7, 1, 0, 8}, {0x7, 1, 0, 8}\n};\n\n\nconst VLCtab2 PV_DCT3Dtab8[] =\n{\n    {0x13, 0x1, 1, 11}, {0x13, 0x1, 1, 11}, {0x14, 0x1, 1, 11}, {0x14, 0x1, 1, 11}, {0x9, 0x2, 0, 11}, {0x9, 0x2, 0, 11},\n    {0x4, 0x3, 0, 11}, {0x4, 0x3, 0, 11}, {0x0, 0x7, 1, 10}, {0x0, 0x7, 1, 10}, {0x0, 0x7, 1, 10}, {0x0, 0x7, 1, 10},\n    {0x1, 0x3, 1, 10}, {0x1, 0x3, 1, 10}, {0x1, 0x3, 1, 10}, {0x1, 0x3, 1, 10}, {0x3, 0x2, 1, 10}, {0x3, 0x2, 1, 10},\n    {0x3, 0x2, 1, 10}, {0x3, 0x2, 1, 10}, {0x4, 0x2, 1, 10}, {0x4, 0x2, 1, 10}, {0x4, 0x2, 1, 10}, {0x4, 0x2, 1, 10},\n    {0xc, 0x1, 0, 10}, {0xc, 0x1, 0, 10}, {0xc, 0x1, 0, 10}, {0xc, 0x1, 0, 10}, {0x2, 0x4, 0, 10}, {0x2, 0x4, 0, 10},\n    {0x2, 0x4, 0, 10}, {0x2, 0x4, 0, 10}, {0x8, 0x2, 0, 10}, {0x8, 0x2, 0, 10}, {0x8, 0x2, 0, 10}, {0x8, 0x2, 0, 10},\n    {0x7, 0x2, 0, 10}, {0x7, 0x2, 0, 10}, {0x7, 0x2, 0, 10}, {0x7, 0x2, 0, 10}, {0x6, 0x2, 0, 10}, {0x6, 0x2, 0, 10},\n    {0x6, 0x2, 0, 10}, {0x6, 0x2, 0, 10}, {0x3, 0x3, 0, 10}, {0x3, 0x3, 0, 10}, {0x3, 0x3, 0, 10}, {0x3, 0x3, 0, 10},\n    {0x2, 0x3, 0, 10}, {0x2, 0x3, 0, 10}, {0x2, 0x3, 0, 10}, {0x2, 0x3, 0, 10}, {0x1, 0x5, 0, 10}, {0x1, 0x5, 0, 10},\n    {0x1, 0x5, 0, 10}, {0x1, 0x5, 0, 10}, {0xd, 0x1, 0, 11}, {0xd, 0x1, 0, 11}, {0x1, 0x6, 0, 11}, {0x1, 0x6, 0, 11},\n    {0x0, 0x14, 0, 11}, {0x0, 0x14, 0, 11}, {0x0, 0x13, 0, 11}, {0x0, 0x13, 0, 11}, {0x2, 0x3, 1, 11}, {0x2, 0x3, 1, 11},\n    {0x1, 0x4, 1, 11}, {0x1, 0x4, 1, 11}, {0x0, 0x9, 1, 11}, {0x0, 0x9, 1, 11}, {0x0, 0x8, 1, 11}, {0x0, 0x8, 1, 11},\n    {0x1, 0x7, 0, 12}, {0x3, 0x4, 0, 12}, {0x5, 0x3, 0, 12}, {0x0, 0x19, 0, 12}, {0x0, 0x18, 0, 12}, {0x0, 0x17, 0, 12},\n    {0x0, 0x16, 0, 12}, {0x0, 0x15, 0, 12}, {0x15, 0x1, 1, 12}, {0x16, 0x1, 1, 12}, {0x17, 0x1, 1, 12}, {0x7, 0x2, 1, 12},\n    {0x6, 0x2, 1, 12}, {0x5, 0x2, 1, 12}, {0x3, 0x3, 1, 12}, {0x0, 0xa, 1, 12}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7},\n    {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7},\n    {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7},\n    {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7},\n    {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7},\n    {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}, {0x2f, 0x3f, 1, 7}\n};\n#endif\n/* RVLC tables */\nconst int ptrRvlcTab[11] = {0, 24, 46, 66, 84, 100, 114, 126, 134, 140, 144};\n\nconst VLCtab2 RvlcDCTtabIntra[170] = /* 00xxxx00 or 00xxxx01 */\n{\n    {27, 255, 0, 5},    /* 0000 is escape code */\n    {1, 1, 0, 4},\n    {2, 1, 0, 5},\n    {3, 1, 0, 5},\n    {4, 1, 0, 6},\n    {5, 1, 0, 6},\n    {6, 1, 0, 7},\n    {7, 1, 0, 7},\n    {8, 1, 0, 8},\n    {9, 1, 0, 8},\n    {10, 1, 0, 9},\n    {5, 2, 0, 9},\n    {11, 1, 0, 10},\n    {12, 1, 0, 10},\n    {13, 1, 0, 11},\n    {9, 2, 0, 11},\n    {10, 2, 0, 12},\n    {4, 4, 0, 12},\n    {14, 1, 0, 13},\n    {15, 1, 0, 13},\n    {16, 1, 0, 14},\n    {17, 1, 0, 14},\n    {0, 27, 0, 15},\n    {3, 9, 0, 15},\n    /* 010xxxx00 or 010xxxx01 */\n    {1, 2, 0, 5},\n    {0, 4, 0, 5},\n    {0, 5, 0, 6},\n    {0, 6, 0, 6},\n    {2, 2, 0, 7},\n    {1, 3, 0, 7},\n    {3, 2, 0, 8},\n    {4, 2, 0, 8},\n    {2, 3, 0, 9},\n    {3, 3, 0, 9},\n    {6, 2, 0, 10},\n    {7, 2, 0, 10},\n    {5, 3, 0, 11},\n    {6, 3, 0, 11},\n    {5, 4, 0, 12},\n    {6, 4, 0, 12},\n    {11, 2, 0, 13},\n    {8, 3, 0, 13},\n    {18, 1, 0, 14},\n    {8, 4, 0, 14},\n    {6, 5, 0, 15},\n    {7, 5, 0, 15},\n    /* 0110xxxx00 or 0110xxxx01 */\n    {3, 1, 1, 6},\n    {4, 1, 1, 6},\n    {0, 7, 0, 7},\n    {7, 1, 1, 7},\n    {1, 4, 0, 8},\n    {1, 5, 0, 8},\n    {1, 6, 0, 9},\n    {0, 10, 0, 9},\n    {8, 2, 0, 10},\n    {4, 3, 0, 10},\n    {7, 3, 0, 11},\n    {3, 4, 0, 11},\n    {3, 5, 0, 12},\n    {4, 5, 0, 12},\n    {9, 3, 0, 13},\n    {7, 4, 0, 13},\n    {5, 5, 0, 14},\n    {4, 6, 0, 14},\n    {9, 4, 0, 15},\n    {12, 2, 0, 15},\n    /* 01110xxxx00 or 01110xxxx01 */\n    {8, 1, 1, 7},\n    {9, 1, 1, 7},\n    {0, 8, 0, 8},\n    {0, 9, 0, 8},\n    {0, 11, 0, 9},\n    {1, 2, 1, 9},\n    {2, 4, 0, 10},\n    {1, 7, 0, 10},\n    {2, 5, 0, 11},\n    {2, 6, 0, 11},\n    {1, 10, 0, 12},\n    {0, 18, 0, 12},\n    {3, 6, 0, 13},\n    {2, 7, 0, 13},\n    {5, 6, 0, 14},\n    {3, 7, 0, 14},\n    {19, 1, 0, 15},\n    {1, 5, 1, 15},\n    /* 011110xxxx00 or 011110xxxx01 */\n    {0, 2, 1, 8},\n    {12, 1, 1, 8},\n    {15, 1, 1, 9},\n    {16, 1, 1, 9},\n    {0, 12, 0, 10},\n    {0, 13, 0, 10},\n    {1, 8, 0, 11},\n    {1, 9, 0, 11},\n    {0, 19, 0, 12},\n    {0, 22, 0, 12},\n    {2, 8, 0, 13},\n    {2, 9, 0, 13},\n    {3, 8, 0, 14},\n    {2, 10, 0, 14},\n    {2, 3, 1, 15},\n    {13, 2, 1, 15},\n    /* 0111110xxxx00 or 0111110xxxx01 */\n    {17, 1, 1, 9},\n    {18, 1, 1, 9},\n    {0, 14, 0, 10},\n    {21, 1, 1, 10},\n    {0, 15, 0, 11},\n    {0, 16, 0, 11},\n    {1, 3, 1, 12},\n    {3, 2, 1, 12},\n    {1, 11, 0, 13},\n    {0, 20, 0, 13},\n    {2, 11, 0, 14},\n    {1, 12, 0, 14},\n    {41, 1, 1, 15},\n    {42, 1, 1, 15},\n    /* 01111110xxxx00 or 01111110xxxx01 */\n    {22, 1, 1, 10},\n    {23, 1, 1, 10},\n    {0, 17, 0, 11},\n    {0, 3, 1, 11},\n    {4, 2, 1, 12},\n    {29, 1, 1, 12},\n    {0, 21, 0, 13},\n    {0, 23, 0, 13},\n    {1, 13, 0, 14},\n    {0, 24, 0, 14},\n    {43, 1, 1, 15},\n    {44, 1, 1, 15},\n    /* 011111110xxxx00 or 011111110xxxx01 */\n    {2, 2, 1, 11},\n    {26, 1, 1, 11},\n    {30, 1, 1, 12},\n    {31, 1, 1, 12},\n    {0, 4, 1, 13},\n    {5, 2, 1, 13},\n    {0, 25, 0, 14},\n    {0, 26, 0, 14},\n    /* 0111111110xxxx00 or 0111111110xxxx01 */\n    {32, 1, 1, 12},\n    {33, 1, 1, 12},\n    {6, 2, 1, 13},\n    {7, 2, 1, 13},\n    {0, 5, 1, 14},\n    {1, 4, 1, 14},\n    /* 01111111110xxxx00 or 01111111110xxxx01 */\n    {8, 2, 1, 13},\n    {9, 2, 1, 13},\n    {10, 2, 1, 14},\n    {11, 2, 1, 14},\n    /* 011111111110xxxx00 or 011111111110xxxx01 */\n    {12, 2, 1, 14},\n    {38, 1, 1, 14},\n    /* 1xxxx10 or 1xxxx11 from 11 zeros to 0 zeros*/\n    {0, 1, 0, 3},\n    {0, 2, 0, 3},\n    {0, 3, 0, 4},\n    {0, 1, 1, 4},\n    {1, 1, 1, 5},\n    {2, 1, 1, 5},\n    {5, 1, 1, 6},\n    {6, 1, 1, 6},\n    {10, 1, 1, 7},\n    {11, 1, 1, 7},\n    {13, 1, 1, 8},\n    {14, 1, 1, 8},\n    {19, 1, 1, 9},\n    {20, 1, 1, 9},\n    {24, 1, 1, 10},\n    {25, 1, 1, 10},\n    {27, 1, 1, 11},\n    {28, 1, 1, 11},\n    {34, 1, 1, 12},\n    {35, 1, 1, 12},\n    {36, 1, 1, 13},\n    {37, 1, 1, 13},\n    {39, 1, 1, 14},\n    {40, 1, 1, 14}\n};\n\nconst VLCtab2 RvlcDCTtabInter[170] = /* 00xxxx00 or 00xxxx01 */\n{\n    {27, 255, 0, 5},    /* 0000 is escape code */\n    {0, 2, 0, 4},\n    {0, 3, 0, 5},\n    {3, 1, 0, 5},\n    {1, 2, 0, 6},\n    {6, 1, 0, 6},\n    {0, 4, 0, 7},\n    {2, 2, 0, 7},\n    {0, 5, 0, 8},\n    {0, 6, 0, 8},\n    {0, 7, 0, 9},\n    {1, 4, 0, 9},\n    {0, 8, 0, 10},\n    {0, 9, 0, 10},\n    {0, 10, 0, 11},\n    {0, 11, 0, 11},\n    {0, 12, 0, 12},\n    {1, 7, 0, 12},\n    {0, 13, 0, 13},\n    {0, 14, 0, 13},\n    {0, 17, 0, 14},\n    {0, 18, 0, 14},\n    {0, 19, 0, 15},\n    {3, 7, 0, 15},\n    /* 010xxxx00 or 010xxxx01 */\n    {4, 1, 0, 5},\n    {5, 1, 0, 5},\n    {7, 1, 0, 6},\n    {8, 1, 0, 6},\n    {9, 1, 0, 7},\n    {10, 1, 0, 7},\n    {1, 3, 0, 8},\n    {3, 2, 0, 8},\n    {2, 3, 0, 9},\n    {5, 2, 0, 9},\n    {1, 5, 0, 10},\n    {3, 3, 0, 10},\n    {1, 6, 0, 11},\n    {2, 4, 0, 11},\n    {2, 5, 0, 12},\n    {3, 4, 0, 12},\n    {0, 15, 0, 13},\n    {0, 16, 0, 13},\n    {1, 9, 0, 14},\n    {1, 10, 0, 14},\n    {4, 5, 0, 15},\n    {7, 4, 0, 15},\n    /* 0110xxxx00 or 0110xxxx01 */\n    {3, 1, 1, 6},\n    {4, 1, 1, 6},\n    {11, 1, 0, 7},\n    {7, 1, 1, 7},\n    {4, 2, 0, 8},\n    {12, 1, 0, 8},\n    {15, 1, 0, 9},\n    {16, 1, 0, 9},\n    {6, 2, 0, 10},\n    {7, 2, 0, 10},\n    {4, 3, 0, 11},\n    {5, 3, 0, 11},\n    {6, 3, 0, 12},\n    {7, 3, 0, 12},\n    {1, 8, 0, 13},\n    {3, 5, 0, 13},\n    {2, 6, 0, 14},\n    {2, 7, 0, 14},\n    {17, 2, 0, 15},\n    {37, 1, 0, 15},\n    /* 01110xxxx00 or 01110xxxx01 */\n    {8, 1, 1, 7},\n    {9, 1, 1, 7},\n    {13, 1, 0, 8},\n    {14, 1, 0, 8},\n    {17, 1, 0, 9},\n    {1, 2, 1, 9},\n    {8, 2, 0, 10},\n    {9, 2, 0, 10},\n    {10, 2, 0, 11},\n    {21, 1, 0, 11},\n    {11, 2, 0, 12},\n    {27, 1, 0, 12},\n    {4, 4, 0, 13},\n    {5, 4, 0, 13},\n    {3, 6, 0, 14},\n    {6, 4, 0, 14},\n    {38, 1, 0, 15},\n    {1, 5, 1, 15},\n    /* 011110xxxx00 or 011110xxxx01 */\n    {0, 2, 1, 8},\n    {12, 1, 1, 8},\n    {15, 1, 1, 9},\n    {16, 1, 1, 9},\n    {18, 1, 0, 10},\n    {19, 1, 0, 10},\n    {22, 1, 0, 11},\n    {23, 1, 0, 11},\n    {28, 1, 0, 12},\n    {29, 1, 0, 12},\n    {8, 3, 0, 13},\n    {12, 2, 0, 13},\n    {9, 3, 0, 14},\n    {13, 2, 0, 14},\n    {2, 3, 1, 15},\n    {13, 2, 1, 15},\n    /* 0111110xxxx00 or 0111110xxxx01 */\n    {17, 1, 1, 9},\n    {18, 1, 1, 9},\n    {20, 1, 0, 10},\n    {21, 1, 1, 10},\n    {24, 1, 0, 11},\n    {25, 1, 0, 11},\n    {1, 3, 1, 12},\n    {3, 2, 1, 12},\n    {30, 1, 0, 13},\n    {31, 1, 0, 13},\n    {14, 2, 0, 14},\n    {15, 2, 0, 14},\n    {41, 1, 1, 15},\n    {42, 1, 1, 15},\n    /* 01111110xxxx00 or 01111110xxxx01 */\n    {22, 1, 1, 10},\n    {23, 1, 1, 10},\n    {26, 1, 0, 11},\n    {0, 3, 1, 11},\n    {4, 2, 1, 12},\n    {29, 1, 1, 12},\n    {32, 1, 0, 13},\n    {33, 1, 0, 13},\n    {16, 2, 0, 14},\n    {34, 1, 0, 14},\n    {43, 1, 1, 15},\n    {44, 1, 1, 15},\n    /* 011111110xxxx00 or 011111110xxxx01 */\n    {2, 2, 1, 11},\n    {26, 1, 1, 11},\n    {30, 1, 1, 12},\n    {31, 1, 1, 12},\n    {0, 4, 1, 13},\n    {5, 2, 1, 13},\n    {35, 1, 0, 14},\n    {36, 1, 0, 14},\n    /* 0111111110xxxx00 or 0111111110xxxx01 */\n    {32, 1, 1, 12},\n    {33, 1, 1, 12},\n    {6, 2, 1, 13},\n    {7, 2, 1, 13},\n    {0, 5, 1, 14},\n    {1, 4, 1, 14},\n    /* 01111111110xxxx00 or 01111111110xxxx01 */\n    {8, 2, 1, 13},\n    {9, 2, 1, 13},\n    {10, 2, 1, 14},\n    {11, 2, 1, 14},\n    /* 011111111110xxxx00 or 011111111110xxxx01 */\n    {12, 2, 1, 14},\n    {38, 1, 1, 14},\n    /* 1xxxx10 or 1xxxx11 from 11 zeros to 0 zeros*/\n    {0, 1, 0, 3},\n    {1, 1, 0, 3},\n    {2, 1, 0, 4},\n    {0, 1, 1, 4},\n    {1, 1, 1, 5},\n    {2, 1, 1, 5},\n    {5, 1, 1, 6},\n    {6, 1, 1, 6},\n    {10, 1, 1, 7},\n    {11, 1, 1, 7},\n    {13, 1, 1, 8},\n    {14, 1, 1, 8},\n    {19, 1, 1, 9},\n    {20, 1, 1, 9},\n    {24, 1, 1, 10},\n    {25, 1, 1, 10},\n    {27, 1, 1, 11},\n    {28, 1, 1, 11},\n    {34, 1, 1, 12},\n    {35, 1, 1, 12},\n    {36, 1, 1, 13},\n    {37, 1, 1, 13},\n    {39, 1, 1, 14},\n    {40, 1, 1, 14}\n};\n\n/*----------------------------------------------------------------------------\n; EXTERNAL FUNCTION REFERENCES\n; Declare functions defined elsewhere and referenced in this module\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES\n; Declare variables used in this module but defined elsewhere\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; FUNCTION CODE\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; Define all local variables\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; Function body here\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; Return nothing or data or data pointer\n----------------------------------------------------------------------------*/\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/vop.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"mp4dec_lib.h\"\n#include \"bitstream.h\"\n#include \"vlc_decode.h\"\n#include \"zigzag.h\"\n\n#define OSCL_DISABLE_WARNING_CONV_POSSIBLE_LOSS_OF_DATA\n#include \"osclconfig_compiler_warnings.h\"\n\n#ifdef PV_SUPPORT_MAIN_PROFILE\n/* INTRA */\nconst static int mpeg_iqmat_def[NCOEFF_BLOCK] =\n{\n    8, 17, 18, 19, 21, 23, 25, 27,\n    17, 18, 19, 21, 23, 25, 27, 28,\n    20, 21, 22, 23, 24, 26, 28, 30,\n    21, 22, 23, 24, 26, 28, 30, 32,\n    22, 23, 24, 26, 28, 30, 32, 35,\n    23, 24, 26, 28, 30, 32, 35, 38,\n    25, 26, 28, 30, 32, 35, 38, 41,\n    27, 28, 30, 32, 35, 38, 41, 45\n};\n\n/* INTER */\nconst static int mpeg_nqmat_def[64]  =\n{\n    16, 17, 18, 19, 20, 21, 22, 23,\n    17, 18, 19, 20, 21, 22, 23, 24,\n    18, 19, 20, 21, 22, 23, 24, 25,\n    19, 20, 21, 22, 23, 24, 26, 27,\n    20, 21, 22, 23, 25, 26, 27, 28,\n    21, 22, 23, 24, 26, 27, 28, 30,\n    22, 23, 24, 26, 27, 28, 30, 31,\n    23, 24, 25, 27, 28, 30, 31, 33\n};\n#endif\n\n/* ======================================================================== */\n/*  Function : CalcNumBits()                                                */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : Calculate the minimum number of bits required to             */\n/*              represent x.                                                */\n/*  Note     : This is an equivalent implementation of                      */\n/*                      (long)ceil(log((double)x)/log(2.0))                 */\n/*  Modified :                                                              */\n/* ======================================================================== */\nint CalcNumBits(uint x)\n{\n    int i = 1;\n    while (x >>= 1) i++;\n    return i;\n}\n\n\n\n/***********************************************************CommentBegin******\n*\n* -- DecodeVolHeader -- Decode the header of a VOL\n*\n*   04/10/2000 : initial modification to the new PV-Decoder Lib format.\n*   10/12/2001 : reject non compliant bitstreams\n*\n***********************************************************CommentEnd********/\nPV_STATUS DecodeVOLHeader(VideoDecData *video, int layer)\n{\n    PV_STATUS status;\n    Vol *currVol;\n    BitstreamDecVideo *stream;\n    uint32 tmpvar, vol_shape;\n    uint32 startCode;\n#ifdef PV_SUPPORT_MAIN_PROFILE\n    int *qmat, i, j;\n#endif\n    int version_id = 1;\n#ifdef PV_TOLERATE_VOL_ERRORS\n    uint32 profile = 0x01;\n#endif\n    /*  There's a \"currLayer\" variable inside videoDecData.          */\n    /*   However, we don't maintain it until we decode frame data.  04/05/2000 */\n    currVol = video->vol[layer];\n    stream  = currVol->bitstream;\n    currVol->moduloTimeBase = 0;\n\n    /* Determine which start code for the decoder to begin with */\n    status = BitstreamShowBits32HC(stream, &startCode);\n\n    if (startCode == VISUAL_OBJECT_SEQUENCE_START_CODE)\n    {   /*  Bitstream Exhchange Fix 9/99 */\n        /* Bitstream Exchange requires we allow start with Video Object Sequence */\n        /* visual_object_sequence_start_code            */\n        (void) BitstreamReadBits32HC(stream);\n        tmpvar = (uint32) BitstreamReadBits16(stream,  8); /* profile */\n#ifndef PV_TOLERATE_VOL_ERRORS\n        if (layer)                                                      /*    */\n        {\n            /* support SSPL0-2  */\n            if (tmpvar != 0x10 && tmpvar != 0x11 && tmpvar != 0x12 &&\n                    tmpvar != 0xA1 && tmpvar != 0xA2  && tmpvar != 0xA3/* Core SP@L1-L3 */)\n                return PV_FAIL;\n        }\n        else\n        {\n            /* support SPL0-3 & SSPL0-2   */\n            if (tmpvar != 0x01 && tmpvar != 0x02 && tmpvar != 0x03 && tmpvar != 0x08 &&\n                    tmpvar != 0x10 && tmpvar != 0x11 && tmpvar != 0x12 &&\n                    tmpvar != 0x21 && tmpvar != 0x22 &&  /* Core Profile Levels */\n                    tmpvar != 0xA1 && tmpvar != 0xA2 && tmpvar != 0xA3 &&\n                    tmpvar != 0xF0 && tmpvar != 0xF1 && /* Advanced Simple Profile Levels*/\n                    tmpvar != 0xF2 && tmpvar != 0xF3 &&\n                    tmpvar != 0xF4 && tmpvar != 0xF5)\n                return PV_FAIL;\n        }\n#else\n        profile = tmpvar;\n#endif\n\n        // save the profile and level for the query\n        currVol->profile_level_id = (uint)tmpvar; //  6/10/04\n\n\n\n        status = BitstreamShowBits32HC(stream, &tmpvar);\n        if (tmpvar == USER_DATA_START_CODE)\n        {\n            /* Something has to be done with user data  11/11/99 */\n            status = DecodeUserData(stream);\n            if (status != PV_SUCCESS) return PV_FAIL;\n        }\n        /* visual_object_start_code                     */\n        BitstreamShowBits32HC(stream, &tmpvar);\n        if (tmpvar != VISUAL_OBJECT_START_CODE)\n        {\n            do\n            {\n                /* Search for VOL_HEADER */\n                status = PVSearchNextM4VFrame(stream); /* search 0x00 0x00 0x01 */\n                if (status != PV_SUCCESS) return PV_FAIL; /* breaks the loop */\n                BitstreamShowBits32(stream, VOL_START_CODE_LENGTH, &tmpvar);\n                PV_BitstreamFlushBits(stream, 8);\n            }\n            while (tmpvar != VOL_START_CODE);\n            goto decode_vol;\n        }\n        else\n        {\n            BitstreamReadBits32HC(stream);\n        }\n\n        /*  is_visual_object_identifier            */\n        tmpvar = (uint32) BitstreamRead1Bits(stream);\n        if (tmpvar)\n        {\n            /* visual_object_verid                            */\n            tmpvar = (uint32) BitstreamReadBits16(stream, 4);\n            /* visual_object_priority                         */\n            tmpvar = (uint32) BitstreamReadBits16(stream, 3);\n        }\n        /* visual_object_type                                 */\n        BitstreamShowBits32(stream, 4, &tmpvar);\n        if (tmpvar == 1)\n        { /* video_signal_type */\n            PV_BitstreamFlushBits(stream, 4);\n            tmpvar = (uint32) BitstreamRead1Bits(stream);\n            if (tmpvar == 1)\n            {\n                /* video_format */\n                tmpvar = (uint32) BitstreamReadBits16(stream, 3);\n                /* video_range  */\n                tmpvar = (uint32) BitstreamRead1Bits(stream);\n                /* color_description */\n                tmpvar = (uint32) BitstreamRead1Bits(stream);\n                if (tmpvar == 1)\n                {\n                    /* color_primaries */\n                    tmpvar = (uint32) BitstreamReadBits16(stream, 8);\n                    /* transfer_characteristics */\n                    tmpvar = (uint32) BitstreamReadBits16(stream, 8);\n                    /* matrix_coefficients */\n                    tmpvar = (uint32) BitstreamReadBits16(stream, 8);\n                }\n            }\n        }\n        else\n        {\n            do\n            {\n                /* Search for VOL_HEADER */\n                status = PVSearchNextM4VFrame(stream); /* search 0x00 0x00 0x01 */\n                if (status != PV_SUCCESS) return PV_FAIL; /* breaks the loop */\n                BitstreamShowBits32(stream, VOL_START_CODE_LENGTH, &tmpvar);\n                PV_BitstreamFlushBits(stream, 8);\n            }\n            while (tmpvar != VOL_START_CODE);\n            goto decode_vol;\n        }\n\n        /* next_start_code() */\n        status = PV_BitstreamByteAlign(stream);                            /*  10/12/01 */\n        status = BitstreamShowBits32HC(stream, &tmpvar);\n\n        if (tmpvar == USER_DATA_START_CODE)\n        {\n            /* Something has to be done to deal with user data (parse it)  11/11/99 */\n            status = DecodeUserData(stream);\n            if (status != PV_SUCCESS) return PV_FAIL;\n        }\n        status = BitstreamShowBits32(stream, 27, &tmpvar);   /*  10/12/01 */\n    }\n    else\n    {\n        /*      tmpvar = 0;   */                                             /*  10/12/01 */\n        status = BitstreamShowBits32(stream, 27, &tmpvar);     /* uncomment this line if you want\n                                                                     to start decoding with a\n                                                                     video_object_start_code */\n    }\n\n    if (tmpvar == VO_START_CODE)\n    {\n        /*****\n        *\n        *   Read the VOL header entries from the bitstream\n        *\n        *****/\n        /* video_object_start_code                         */\n        tmpvar = BitstreamReadBits32(stream, 27);\n        tmpvar = (uint32) BitstreamReadBits16(stream, 5);\n\n\n        /* video_object_layer_start_code                   */\n        BitstreamShowBits32(stream, VOL_START_CODE_LENGTH, &tmpvar);\n        if (tmpvar != VOL_START_CODE)\n        {\n            status = BitstreamCheckEndBuffer(stream);\n            if (status == PV_END_OF_VOP)\n            {\n                video->shortVideoHeader = PV_H263;\n                return PV_SUCCESS;\n            }\n            else\n            {\n                do\n                {\n                    /* Search for VOL_HEADER */\n                    status = PVSearchNextM4VFrame(stream);/* search 0x00 0x00 0x01 */\n                    if (status != PV_SUCCESS) return PV_FAIL; /* breaks the loop */\n                    BitstreamShowBits32(stream, VOL_START_CODE_LENGTH, &tmpvar);\n                    PV_BitstreamFlushBits(stream, 8); /* advance the byte ptr */\n                }\n                while (tmpvar != VOL_START_CODE);\n            }\n        }\n        else\n        {\n            PV_BitstreamFlushBits(stream, 8);\n        }\n\ndecode_vol:\n        PV_BitstreamFlushBits(stream, VOL_START_CODE_LENGTH - 8);\n        video->shortVideoHeader = 0;\n\n        /* vol_id (4 bits) */\n        currVol->volID = (int) BitstreamReadBits16(stream, 4);\n\n        /* RandomAccessible flag */\n        tmpvar = (uint32) BitstreamRead1Bits(stream);\n\n        /* object type */\n        tmpvar = (uint32) BitstreamReadBits16(stream, 8);                /*  */\n\n#ifdef PV_TOLERATE_VOL_ERRORS\n        if (tmpvar == 0)\n        {\n            if (layer)                                                      /*    */\n            {\n                /* support SSPL0-2  */\n                if (profile != 0x10 && profile != 0x11 && profile != 0x12)\n                    return PV_FAIL;\n                tmpvar = 0x02;\n            }\n            else\n            {\n                /* support SPL0-3 & SSPL0-2   */\n                if (profile != 0x01 && profile != 0x02 && profile != 0x03 && profile != 0x08 &&\n                        profile != 0x10 && profile != 0x11 && profile != 0x12)\n                    return PV_FAIL;\n                tmpvar = 0x01;\n            }\n            profile |= 0x0100;\n        }\n#endif\n\n        if (layer)\n        {\n            if (tmpvar != 0x02) return PV_FAIL;\n        }\n        else\n        {\n            if (tmpvar != 0x01) return PV_FAIL;\n        }\n\n        /* version id specified? */\n        tmpvar = (uint32) BitstreamRead1Bits(stream);\n        if (tmpvar == 1)\n        {\n            /* version ID */\n            version_id = (uint32) BitstreamReadBits16(stream, 4);\n            /* priority */\n            tmpvar = (uint32) BitstreamReadBits16(stream, 3);\n\n        }\n\n        /* aspect ratio info */\n        tmpvar = (uint32) BitstreamReadBits16(stream, 4);\n//      Commenting out PV_FAIL return in aspect ratio info is 0. Don't think there is bitstream corruption.\n//      It's just bad encoding. We can make a change to our decoder to ignore this type of encoding flaw.\n//      if (tmpvar == 0) return PV_FAIL;\n        if (tmpvar == 0xf /* extended_par */)\n        {\n            /* width */\n            tmpvar = (uint32) BitstreamReadBits16(stream, 8);\n            /* height */\n            tmpvar = (uint32) BitstreamReadBits16(stream, 8);\n        }\n\n\n        /* control parameters present? */\n        tmpvar = (uint32) BitstreamRead1Bits(stream);\n\n        /*  Get the parameters (skipped) */\n        /*  03/10/99 */\n        if (tmpvar)\n        {\n            /* chroma_format                    */\n            tmpvar = BitstreamReadBits16(stream, 2);\n            if (tmpvar != 1) return PV_FAIL;\n            /* low_delay  */\n            tmpvar = BitstreamRead1Bits(stream);\n\n            /* vbv_parameters present? */\n            tmpvar = (uint32) BitstreamRead1Bits(stream);\n            if (tmpvar)\n            {\n                /* first_half_bit_rate    */\n                BitstreamReadBits16(stream, 15);\n                if (!BitstreamRead1Bits(stream)) return PV_FAIL;\n                /* latter_half_bit_rate   */\n                BitstreamReadBits16(stream, 15);\n                if (!BitstreamRead1Bits(stream)) return PV_FAIL;\n                /* first_half_vbv_buffer_size   */\n                BitstreamReadBits16(stream, 15);\n                if (!BitstreamRead1Bits(stream)) return PV_FAIL;\n                /* latter_half_vbv_buffer_size   */\n                BitstreamReadBits16(stream,  3);\n                /* first_half_vbv_occupancy     */\n                BitstreamReadBits16(stream, 11);\n                if (!BitstreamRead1Bits(stream)) return PV_FAIL;\n                /* latter_half_vbv_occupancy  */\n                BitstreamReadBits16(stream, 15);\n                if (!BitstreamRead1Bits(stream)) return PV_FAIL;\n            }\n        }\n\n        /* video_object_layer_shape (2 bits), only 00 (rect) is supported for now */\n        vol_shape = (uint32) BitstreamReadBits16(stream, 2);\n        if (vol_shape) return PV_FAIL;\n\n        /* marker bit,  03/10/99 */\n        if (!BitstreamRead1Bits(stream)) return PV_FAIL;\n\n        /* vop_time_increment_resolution   */\n        currVol->timeIncrementResolution = BitstreamReadBits16(stream, 16);\n        if (currVol->timeIncrementResolution == 0) return PV_FAIL;\n\n        /* . since nbitsTimeIncRes will be used over and over again, */\n        /*    we should put it in Vol structure.  04/12/2000.          */\n        currVol->nbitsTimeIncRes = CalcNumBits((uint)currVol->timeIncrementResolution - 1);\n\n        if (!BitstreamRead1Bits(stream)) return PV_FAIL;\n\n        /* fixed_vop_rate */\n        currVol->fixedVopRate = (int) BitstreamRead1Bits(stream);\n        if (currVol->fixedVopRate)\n        {\n            /* fixed_vop_time_increment */\n            tmpvar = BitstreamReadBits16(stream, currVol->nbitsTimeIncRes);\n        }\n\n        /* marker bit */\n        if (!BitstreamRead1Bits(stream)) return PV_FAIL;\n\n        /* video_object_layer_width (13 bits) */\n        video->displayWidth = video->width = (int) BitstreamReadBits16(stream, 13);\n\n        /* round up to a multiple of MB_SIZE.   08/09/2000 */\n        video->width = (video->width + 15) & -16;\n//      video->displayWidth += (video->displayWidth & 0x1);  /* displayed image should be even size */\n\n        /* marker bit */\n        if (!BitstreamRead1Bits(stream)) return PV_FAIL;\n\n        /* video_object_layer_height (13 bits) */\n        video->displayHeight = video->height = (int) BitstreamReadBits16(stream, 13);\n\n        /* round up to a multiple of MB_SIZE.   08/09/2000 */\n        video->height = (video->height + 15) & -16;\n//      video->displayHeight += (video->displayHeight & 0x1); /* displayed image should be even size */\n        if (!BitstreamRead1Bits(stream)) return PV_FAIL;\n\n        /*  03/10/99 */\n        /* interlaced */\n        tmpvar = (uint32) BitstreamRead1Bits(stream);\n        if (tmpvar != 0)\n        {\n            mp4dec_log(\"DecodeVOLHeader(): Interlaced video is not supported.\\n\");\n            return PV_FAIL;\n        }\n\n        /* obmc_disable */\n        tmpvar = (uint32) BitstreamRead1Bits(stream);\n        if (tmpvar == 0) return PV_FAIL;\n\n        if (version_id == 1)\n        {\n            /*  sprite_enable (1 bits) */\n            tmpvar = (uint32) BitstreamRead1Bits(stream);\n            if (tmpvar)\n            {\n                mp4dec_log(\"DecodeVOLHeader(): Sprite is not supported.\\n\");\n                return PV_FAIL;\n            }\n        }\n        else\n        {\n            /* For version 2, vol_sprite_usage has two bits. */\n            /* sprite_enable */\n            tmpvar = (uint32) BitstreamReadBits16(stream, 2);\n            if (tmpvar)\n            {\n                mp4dec_log(\"DecodeVOLHeader(): Sprite is not supported.\\n\");\n                return PV_FAIL;\n            }\n        }\n\n        /* not_8_bit */\n        if (BitstreamRead1Bits(stream))\n        {\n            /* quant_precision */\n            currVol->quantPrecision = BitstreamReadBits16(stream, 4);\n            /* bits_per_pixel  */\n            currVol->bitsPerPixel = BitstreamReadBits16(stream, 4);\n            mp4dec_log(\"DecodeVOLHeader(): not an 8-bit stream.\\n\");    // For the time being we do not support != 8 bits\n\n            return PV_FAIL;\n        }\n        else\n        {\n            currVol->quantPrecision = 5;\n            currVol->bitsPerPixel = 8;\n        }\n\n        /* quant_type (1 bit) */\n        currVol->quantType = BitstreamRead1Bits(stream);\n        if (currVol->quantType)\n        {\n#ifdef PV_SUPPORT_MAIN_PROFILE\n            /* load quantization matrices.   5/22/2000 */\n            /* load_intra_quant_mat (1 bit) */\n            qmat = currVol->iqmat;\n            currVol->loadIntraQuantMat = BitstreamRead1Bits(stream);\n            if (currVol->loadIntraQuantMat)\n            {\n                /* intra_quant_mat (8*64 bits) */\n                i = 0;\n                do\n                {\n                    qmat[*(zigzag_inv+i)] = (int) BitstreamReadBits16(stream, 8);\n                }\n                while ((qmat[*(zigzag_inv+i)] != 0) && (++i < 64));\n\n                for (j = i; j < 64; j++)\n                    qmat[*(zigzag_inv+j)] = qmat[*(zigzag_inv+i-1)];\n            }\n            else\n            {\n                oscl_memcpy(qmat, mpeg_iqmat_def, 64*sizeof(int));\n            }\n\n            qmat[0] = 0;             /* necessary for switched && MPEG quant  07/09/01 */\n\n            /* load_nonintra_quant_mat (1 bit) */\n            qmat = currVol->niqmat;\n            currVol->loadNonIntraQuantMat = BitstreamRead1Bits(stream);\n            if (currVol->loadNonIntraQuantMat)\n            {\n                /* nonintra_quant_mat (8*64 bits) */\n                i = 0;\n                do\n                {\n                    qmat[*(zigzag_inv+i)] = (int) BitstreamReadBits16(stream, 8);\n                }\n                while ((qmat[*(zigzag_inv+i)] != 0) && (++i < 64));\n\n                for (j = i; j < 64; j++)\n                    qmat[*(zigzag_inv+j)] = qmat[*(zigzag_inv+i-1)];\n            }\n            else\n            {\n                oscl_memcpy(qmat, mpeg_nqmat_def, 64*sizeof(int));\n            }\n#else\n            return PV_FAIL;\n#endif\n        }\n\n        if (version_id != 1)\n        {\n            /* quarter_sample enabled */\n            tmpvar = BitstreamRead1Bits(stream);\n            if (tmpvar) return PV_FAIL;\n        }\n\n        /* complexity_estimation_disable */\n        currVol->complexity_estDisable = BitstreamRead1Bits(stream);\n        if (currVol->complexity_estDisable == 0)\n        {\n            currVol->complexity_estMethod = BitstreamReadBits16(stream, 2);\n\n            if (currVol->complexity_estMethod < 2)\n            {\n                /* shape_complexity_estimation_disable */\n                tmpvar = BitstreamRead1Bits(stream);\n                if (tmpvar == 0)\n                {\n                    mp4dec_log(\"DecodeVOLHeader(): Shape Complexity estimation is not supported.\\n\");\n                    return PV_FAIL;\n                }\n                /* texture_complexity_estimation_set_1_disable */\n                tmpvar = BitstreamRead1Bits(stream);\n                if (tmpvar == 0)\n                {\n                    currVol->complexity.text_1 = BitstreamReadBits16(stream, 4);\n                }\n                /* marker bit */\n                if (!BitstreamRead1Bits(stream)) return PV_FAIL;\n                /* texture_complexity_estimation_set_2_disable */\n                tmpvar = BitstreamRead1Bits(stream);\n                if (tmpvar == 0)\n                {\n                    currVol->complexity.text_2 = BitstreamReadBits16(stream, 4);\n                }\n                /* motion_compensation_complexity_disable */\n                tmpvar = BitstreamRead1Bits(stream);\n                if (tmpvar == 0)\n                {\n                    currVol->complexity.mc = BitstreamReadBits16(stream, 6);\n                }\n                /* marker bit */\n                if (!BitstreamRead1Bits(stream)) return PV_FAIL;\n\n                if (currVol->complexity_estMethod == 1)\n                {   /* version2_complexity_estimation_disable */\n                    tmpvar = BitstreamRead1Bits(stream);\n                    if (tmpvar == 0)\n                    {\n                        mp4dec_log(\"DecodeVOLHeader(): sadct, quarter pel not supported.\\n\");\n                        return PV_FAIL;\n                    }\n                }\n            }\n        }\n\n        /*  03/10/99 */\n        /* resync_marker_disable */\n        currVol->errorResDisable = (int) BitstreamRead1Bits(stream);\n        /* data_partititioned    */\n        currVol->dataPartitioning = (int) BitstreamRead1Bits(stream);\n\n        video->vlcDecCoeffIntra = &VlcDecTCOEFIntra;\n        video->vlcDecCoeffInter = &VlcDecTCOEFInter;\n\n        if (currVol->dataPartitioning)\n        {\n            if (layer) return PV_FAIL;                              /*  */\n            /* reversible_vlc */\n            currVol->useReverseVLC = (int)BitstreamRead1Bits(stream);\n            if (currVol->useReverseVLC)\n            {\n                video->vlcDecCoeffIntra = &RvlcDecTCOEFIntra;\n                video->vlcDecCoeffInter = &RvlcDecTCOEFInter;\n            }\n            currVol->errorResDisable = 0;\n        }\n        else\n        {\n            currVol->useReverseVLC = 0;\n        }\n\n        if (version_id != 1)\n        {\n            /* newpred_enable */\n            tmpvar = BitstreamRead1Bits(stream);\n            if (tmpvar) return PV_FAIL;\n\n            /* reduced_resolution_vop */\n            tmpvar = BitstreamRead1Bits(stream);\n            if (tmpvar) return PV_FAIL;\n\n        }\n\n        /* Intra AC/DC prediction is always true */\n        video->intra_acdcPredDisable = 0;\n        /* scalability */\n        currVol->scalability = (int) BitstreamRead1Bits(stream);\n\n        if (currVol->scalability)\n        {\n            if (layer == 0)  return PV_FAIL;                     /*  */\n            /* hierarchy_type: 1 : temporal, 0 : spatial */\n            /*  03/10/99 */\n            currVol->scalType = (int) BitstreamRead1Bits(stream);              /*  */\n            if (!currVol->scalType) return PV_FAIL;\n\n            /* ref_layer_id (4 bits) */\n            currVol->refVolID = (int) BitstreamReadBits16(stream, 4);\n            if (layer)                                                      /*  */\n            {\n                if (currVol->refVolID != video->vol[0]->volID) return PV_FAIL;\n            }\n            /* ref_layer_sampling_direc (1 bits)              */\n            /*   1 : ref. layer has higher resolution         */\n            /*   0 : ref. layer has equal or lower resolution */\n            currVol->refSampDir = (int) BitstreamRead1Bits(stream);\n            if (currVol->refSampDir) return PV_FAIL;\n\n            /* hor_sampling_factor_n (5 bits) */\n            currVol->horSamp_n = (int) BitstreamReadBits16(stream, 5);\n\n            /* hor_sampling_factor_m (5 bits) */\n            currVol->horSamp_m = (int) BitstreamReadBits16(stream, 5);\n\n            if (currVol->horSamp_m == 0) return PV_FAIL;\n            if (currVol->horSamp_n != currVol->horSamp_m) return PV_FAIL;\n\n            /* ver_sampling_factor_n (5 bits) */\n            currVol->verSamp_n = (int) BitstreamReadBits16(stream, 5);\n\n            /* ver_sampling_factor_m (5 bits) */\n            currVol->verSamp_m = (int) BitstreamReadBits16(stream, 5);\n\n            if (currVol->verSamp_m == 0) return PV_FAIL;\n            if (currVol->verSamp_n != currVol->verSamp_m) return PV_FAIL;\n\n\n            /* enhancement_type: 1 : partial region, 0 : full region */\n            /* 04/10/2000: we only support full region enhancement layer. */\n            if (BitstreamRead1Bits(stream)) return PV_FAIL;\n        }\n\n        PV_BitstreamByteAlign(stream);\n\n        status = BitstreamShowBits32HC(stream, &tmpvar);\n\n        /* if we hit the end of buffer, tmpvar == 0.   08/30/2000 */\n        if (tmpvar == USER_DATA_START_CODE)\n        {\n            status = DecodeUserData(stream);\n            /* you should not check for status here  03/19/2002 */\n            status = PV_SUCCESS;\n        }\n\n        /* Compute some convenience variables:   04/13/2000 */\n        video->nMBPerRow = video->width / MB_SIZE;\n        video->nMBPerCol = video->height / MB_SIZE;\n        video->nTotalMB = video->nMBPerRow * video->nMBPerCol;\n        video->nBitsForMBID = CalcNumBits((uint)video->nTotalMB - 1);\n#ifdef PV_ANNEX_IJKT_SUPPORT\n        video->modified_quant = 0;\n        video->advanced_INTRA = 0;\n        video->deblocking = 0;\n        video->slice_structure = 0;\n#endif\n    }\n    else\n    {\n        /* SHORT_HEADER */\n        status = BitstreamShowBits32(stream, SHORT_VIDEO_START_MARKER_LENGTH, &tmpvar);\n\n        if (tmpvar == SHORT_VIDEO_START_MARKER)\n        {\n            video->shortVideoHeader = PV_H263;\n        }\n        else\n        {\n            do\n            {\n                /* Search for VOL_HEADER */\n                status = PVSearchNextM4VFrame(stream); /* search 0x00 0x00 0x01 */\n                if (status != PV_SUCCESS) return PV_FAIL; /* breaks the loop */\n                BitstreamShowBits32(stream, VOL_START_CODE_LENGTH, &tmpvar);\n                PV_BitstreamFlushBits(stream, 8);\n            }\n            while (tmpvar != VOL_START_CODE);\n            goto decode_vol;\n        }\n    }\n#ifdef PV_TOLERATE_VOL_ERRORS\n    if (profile > 0xFF || profile == 0)\n    {\n        return PV_BAD_VOLHEADER;\n    }\n#endif\n\n    return status;\n}\n\n\n/***********************************************************CommentBegin******\n*\n* -- DecodeGOV -- Decodes the Group of VOPs from bitstream\n*\n*   04/20/2000  initial modification to the new PV-Decoder Lib format.\n*\n***********************************************************CommentEnd********/\nPV_STATUS DecodeGOVHeader(BitstreamDecVideo *stream, uint32 *time_base)\n{\n    uint32 tmpvar, time_s;\n    int closed_gov, broken_link;\n\n    /* group_start_code (32 bits) */\n//   tmpvar = BitstreamReadBits32(stream, 32);\n\n    /* hours */\n    tmpvar = (uint32) BitstreamReadBits16(stream, 5);\n    time_s = tmpvar * 3600;\n\n    /* minutes */\n    tmpvar = (uint32) BitstreamReadBits16(stream, 6);\n    time_s += tmpvar * 60;\n\n    /* marker bit */\n    tmpvar = (uint32) BitstreamRead1Bits(stream);\n\n    /* seconds */\n    tmpvar = (uint32) BitstreamReadBits16(stream, 6);\n    time_s += tmpvar;\n\n    /* We have to check the timestamp here.  If the sync timestamp is */\n    /*    earlier than the previous timestamp or longer than 60 sec.  */\n    /*    after the previous timestamp, assume the GOV header is      */\n    /*    corrupted.                                 05/12/2000     */\n    *time_base = time_s;   /*  02/27/2002 */\n//  *time_base = *time_base/1000;\n//  tmpvar = time_s - *time_base;\n//  if (tmpvar <= 60) *time_base = time_s;\n//  else return PV_FAIL;\n\n    tmpvar = (uint32) BitstreamRead1Bits(stream);\n    closed_gov = tmpvar;\n    tmpvar = (uint32) BitstreamRead1Bits(stream);\n    broken_link = tmpvar;\n\n    if ((closed_gov == 0) && (broken_link == 1))\n    {\n        return PV_SUCCESS;        /*  03/15/2002  you can also return PV_FAIL */\n    }\n\n    PV_BitstreamByteAlign(stream);\n\n    BitstreamShowBits32HC(stream, &tmpvar);\n\n    while (tmpvar == USER_DATA_START_CODE)       /*  03/15/2002 */\n    {\n        DecodeUserData(stream);\n        BitstreamShowBits32HC(stream, &tmpvar);\n    }\n\n    return PV_SUCCESS;\n}\n\n/***********************************************************CommentBegin******\n*\n* -- DecodeVopHeader -- Decodes the VOPheader information from the bitstream\n*\n*   04/12/2000  Initial port to the new PV decoder library format.\n*   05/10/2000  Error resilient decoding of vop header.\n*\n***********************************************************CommentEnd********/\nPV_STATUS DecodeVOPHeader(VideoDecData *video, Vop *currVop, Bool use_ext_timestamp)\n{\n    PV_STATUS status = PV_SUCCESS;\n    Vol *currVol = video->vol[video->currLayer];\n    BitstreamDecVideo *stream = currVol->bitstream;\n    uint32 tmpvar;\n    int time_base;\n\n    /*****\n    *   Read the VOP header from the bitstream (No shortVideoHeader Mode here!)\n    *****/\n    BitstreamShowBits32HC(stream, &tmpvar);\n\n    /* check if we have a GOV header here.   08/30/2000 */\n    if (tmpvar == GROUP_START_CODE)\n    {\n        tmpvar = BitstreamReadBits32HC(stream);\n//      rewindBitstream(stream, START_CODE_LENGTH); /* for backward compatibility */\n        status = DecodeGOVHeader(stream, &tmpvar);\n        if (status != PV_SUCCESS)\n        {\n            return status;\n        }\n//      use_ext_timestamp = TRUE;   /*  02/08/2002 */\n        /* We should have a VOP header following the GOV header.  03/15/2001 */\n        BitstreamShowBits32HC(stream, &tmpvar);\n    }\n#ifdef PV_SUPPORT_TEMPORAL_SCALABILITY\n    currVop->timeStamp = -1;\n#endif\n    if (tmpvar == VOP_START_CODE)\n    {\n        tmpvar = BitstreamReadBits32HC(stream);\n    }\n    else\n    {\n        PV_BitstreamFlushBits(stream, 8); // advance by a byte\n        status = PV_FAIL;\n        goto return_point;\n    }\n\n\n\n    /* vop_prediction_type (2 bits) */\n    currVop->predictionType = (int) BitstreamReadBits16(stream, 2);\n\n    /* modulo_time_base (? bits) */\n    time_base = -1;\n    do\n    {\n        time_base++;\n        tmpvar = (uint32) BitstreamRead1Bits(stream);\n    }\n    while (tmpvar == 1);\n\n\n\n    if (!use_ext_timestamp)\n    {\n        currVol->moduloTimeBase += 1000 * time_base; /* milliseconds based MTB  11/12/01 */\n    }\n\n    /* marker_bit (1 bit) */\n    if (!BitstreamRead1Bits(stream))\n    {\n        status = PV_FAIL;\n        goto return_point;\n    }\n\n    /* vop_time_increment (1-15 bits) in Nov_Compliant (1-16 bits) */\n    /*    we always assumes fixed vop rate here */\n    currVop->timeInc = BitstreamReadBits16(stream, currVol->nbitsTimeIncRes);\n\n\n    /* marker_bit (1 bit) */\n    if (!BitstreamRead1Bits(stream))\n    {\n        status = PV_FAIL;\n        goto return_point;\n    }\n\n    /* vop_coded */\n    currVop->vopCoded = (int) BitstreamRead1Bits(stream);\n\n\n    if (currVop->vopCoded == 0)\n    {\n        status = PV_SUCCESS;\n        goto return_point;\n    }\n\n\n    /* read vop_rounding_type */\n    if (currVop->predictionType == P_VOP)\n    {\n        currVop->roundingType = (int) BitstreamRead1Bits(stream);\n    }\n    else\n    {\n        currVop->roundingType = 0;\n    }\n\n    if (currVol->complexity_estDisable == 0)\n    {\n        if (currVol->complexity_estMethod < 2)   /*   OCT 2002 */\n        {\n            if ((currVol->complexity.text_1 >> 3) & 0x1)    /* intra        */\n                BitstreamReadBits16(stream, 8);\n            if (currVol->complexity.text_1 & 0x1)           /* not_coded    */\n                BitstreamReadBits16(stream, 8);\n            if ((currVol->complexity.text_2 >> 3) & 0x1)    /* dct_coefs    */\n                BitstreamReadBits16(stream, 8);\n            if ((currVol->complexity.text_2 >> 2) & 0x1)    /* dct_lines    */\n                BitstreamReadBits16(stream, 8);\n            if ((currVol->complexity.text_2 >> 1) & 0x1)    /* vlc_symbols  */\n                BitstreamReadBits16(stream, 8);\n            if (currVol->complexity.text_2 & 0x1)           /* vlc_bits     */\n                BitstreamReadBits16(stream, 4);\n\n            if (currVop->predictionType != I_VOP)\n            {\n                if ((currVol->complexity.text_1 >> 2) & 0x1)    /* inter    */\n                    BitstreamReadBits16(stream, 8);\n                if ((currVol->complexity.text_1 >> 1) & 0x1)    /* inter_4v */\n                    BitstreamReadBits16(stream, 8);\n                if ((currVol->complexity.mc >> 5) & 0x1)        /* apm      */\n                    BitstreamReadBits16(stream, 8);\n                if ((currVol->complexity.mc >> 4) & 0x1)        /* npm      */\n                    BitstreamReadBits16(stream, 8);\n                /* interpolate_mc_q */\n                if ((currVol->complexity.mc >> 2) & 0x1)        /* forw_back_mc_q */\n                    BitstreamReadBits16(stream, 8);\n                if ((currVol->complexity.mc >> 1) & 0x1)        /* halfpel2 */\n                    BitstreamReadBits16(stream, 8);\n                if (currVol->complexity.mc & 0x1)               /* halfpel4 */\n                    BitstreamReadBits16(stream, 8);\n            }\n            if (currVop->predictionType == B_VOP)\n            {\n                if ((currVol->complexity.mc >> 3) & 0x1)        /* interpolate_mc_q */\n                    BitstreamReadBits16(stream, 8);\n            }\n        }\n    }\n\n    /* read intra_dc_vlc_thr */\n    currVop->intraDCVlcThr = (int) BitstreamReadBits16(stream, 3);\n\n    /* read vop_quant (currVol->quantPrecision bits) */\n    currVop->quantizer = (int16) BitstreamReadBits16(stream, currVol->quantPrecision);\n    if (currVop->quantizer == 0)\n    {\n        currVop->quantizer = video->prevVop->quantizer;\n        status = PV_FAIL;\n        goto return_point;\n    }\n\n\n    /* read vop_fcode_forward */\n    if (currVop->predictionType != I_VOP)\n    {\n        tmpvar = (uint32) BitstreamReadBits16(stream, 3);\n        if (tmpvar < 1)\n        {\n            currVop->fcodeForward = 1;\n            status = PV_FAIL;\n            goto return_point;\n        }\n        currVop->fcodeForward = tmpvar;\n    }\n    else\n    {\n        currVop->fcodeForward = 0;\n    }\n\n    /* read vop_fcode_backward */\n    if (currVop->predictionType == B_VOP)\n    {\n        return PV_FAIL; // return this for now.\n    }\n    else\n    {\n        currVop->fcodeBackward = 0;\n    }\n\n    if (currVol->scalability)\n    {\n        currVop->refSelectCode = (int) BitstreamReadBits16(stream, 2);\n    }\n\nreturn_point:\n    return status;\n}\n\n\n/***********************************************************CommentBegin******\n*\n* -- VideoPlaneWithShortHeader -- Decodes the short_video_header information from the bitstream\n* Modified :\n             04/23/2001.  Remove the codes related to the\n                 \"first pass\" decoding.  We use a different function\n                 to set up the decoder now.\n***********************************************************CommentEnd********/\nPV_STATUS DecodeShortHeader(VideoDecData *video, Vop *currVop)\n{\n    PV_STATUS status = PV_SUCCESS;\n    {\n        status = DecodeH263Header(video, currVop);\n    }\n    return status;\n}\n\n\nPV_STATUS DecodeH263Header(VideoDecData *video, Vop *currVop)\n{\n    PV_STATUS status = PV_SUCCESS;\n    Vol *currVol = video->vol[0];\n    BitstreamDecVideo *stream = currVol->bitstream;\n    uint32 tmpvar;\n    int32 size;\n\n    int extended_PTYPE = FALSE;\n    int UFEP = 0, custom_PFMT = 0, custom_PCF = 0;\n\n    status = BitstreamShowBits32(stream, SHORT_VIDEO_START_MARKER_LENGTH, &tmpvar);\n\n    if (tmpvar !=  SHORT_VIDEO_START_MARKER)\n    {\n        {\n            status = PV_FAIL;\n            goto return_point;\n        }\n    }\n\n\n    PV_BitstreamFlushBits(stream, SHORT_VIDEO_START_MARKER_LENGTH);\n\n    /* Temporal reference. Using vop_time_increment_resolution = 30000 */\n    tmpvar = (uint32) BitstreamReadBits16(stream, 8);\n    currVop->temporalRef = (int) tmpvar;\n\n\n    currVop->timeInc = 0xff & (256 + currVop->temporalRef - video->prevVop->temporalRef);\n    currVol->moduloTimeBase += currVop->timeInc; /* mseconds   11/12/01 */\n    /* Marker Bit */\n    if (!BitstreamRead1Bits(stream))\n    {\n        mp4dec_log(\"DecodeShortHeader(): Market bit wrong.\\n\");\n        status = PV_FAIL;\n        goto return_point;\n    }\n\n    /* Zero Bit */\n    if (BitstreamRead1Bits(stream))\n    {\n        mp4dec_log(\"DecodeShortHeader(): Zero bit wrong.\\n\");\n        status = PV_FAIL;\n        goto return_point;\n    }\n\n    /*split_screen_indicator*/\n    if (BitstreamRead1Bits(stream))\n    {\n        mp4dec_log(\"DecodeShortHeader(): Split Screen not supported.\\n\");\n        VideoDecoderErrorDetected(video);\n    }\n\n    /*document_freeze_camera*/\n    if (BitstreamRead1Bits(stream))\n    {\n        mp4dec_log(\"DecodeShortHeader(): Freeze Camera not supported.\\n\");\n        VideoDecoderErrorDetected(video);\n    }\n\n    /*freeze_picture_release*/\n    if (BitstreamRead1Bits(stream))\n    {\n        mp4dec_log(\"DecodeShortHeader(): Freeze Release not supported.\\n\");\n        VideoDecoderErrorDetected(video);\n    }\n    /* source format */\n    switch (BitstreamReadBits16(stream, 3))\n    {\n        case 1:\n            video->displayWidth = video->width =  128;\n            video->displayHeight = video->height  = 96;\n            break;\n\n        case 2:\n            video->displayWidth = video->width  = 176;\n            video->displayHeight = video->height  = 144;\n            break;\n\n        case 3:\n            video->displayWidth = video->width = 352;\n            video->displayHeight = video->height = 288;\n            break;\n\n        case 4:\n            video->displayWidth = video->width = 704;\n            video->displayHeight = video->height = 576;\n            break;\n\n        case 5:\n            video->displayWidth = video->width = 1408;\n            video->displayHeight = video->height = 1152;\n            break;\n\n        case 7:\n            extended_PTYPE = TRUE;\n            break;\n\n        default:\n            /* Msg(\"H.263 source format not legal\\n\"); */\n            status = PV_FAIL;\n            goto return_point;\n    }\n\n\n    currVop->roundingType = 0;\n\n    if (extended_PTYPE == FALSE)\n    {\n        currVop->predictionType = (int) BitstreamRead1Bits(stream);\n\n        /* four_reserved_zero_bits */\n        if (BitstreamReadBits16(stream, 4))\n        {\n            mp4dec_log(\"DecodeShortHeader(): Reserved bits wrong.\\n\");\n            status = PV_FAIL;\n            goto return_point;\n        }\n    }\n    else\n    {\n        UFEP = BitstreamReadBits16(stream, 3);\n        if (UFEP == 1)\n        {\n            /* source format */\n            switch (BitstreamReadBits16(stream, 3))\n            {\n                case 1:\n                    video->displayWidth = video->width =  128;\n                    video->displayHeight = video->height  = 96;\n                    break;\n\n                case 2:\n                    video->displayWidth = video->width  = 176;\n                    video->displayHeight = video->height  = 144;\n                    break;\n\n                case 3:\n                    video->displayWidth = video->width = 352;\n                    video->displayHeight = video->height = 288;\n                    break;\n\n                case 4:\n                    video->displayWidth = video->width = 704;\n                    video->displayHeight = video->height = 576;\n                    break;\n\n                case 5:\n                    video->displayWidth = video->width = 1408;\n                    video->displayHeight = video->height = 1152;\n                    break;\n\n                case 6:\n                    custom_PFMT = TRUE;\n                    break;\n\n                default:\n                    /* Msg(\"H.263 source format not legal\\n\"); */\n                    status = PV_FAIL;\n                    goto return_point;\n            }\n\n            custom_PCF = BitstreamRead1Bits(stream);\n            /* unrestricted MV */\n            if (BitstreamRead1Bits(stream))\n            {\n                status = PV_FAIL;\n                goto return_point;\n            }\n            /* SAC */\n            if (BitstreamRead1Bits(stream))\n            {\n                status = PV_FAIL;\n                goto return_point;\n            }\n\n            /* AP */\n            if (BitstreamRead1Bits(stream))\n            {\n                status = PV_FAIL;\n                goto return_point;\n            }\n\n            video->advanced_INTRA = BitstreamRead1Bits(stream);\n\n            video->deblocking = BitstreamRead1Bits(stream);\n\n            video->slice_structure = BitstreamRead1Bits(stream);\n\n            /* RPS, ISD, AIV */\n            if (BitstreamReadBits16(stream, 3))\n            {\n                status = PV_FAIL;\n                goto return_point;\n            }\n            video->modified_quant = BitstreamRead1Bits(stream);\n\n            /* Marker Bit and reserved*/\n            if (BitstreamReadBits16(stream, 4) != 8)\n            {\n                status = PV_FAIL;\n                goto return_point;\n            }\n        }\n#ifndef PV_ANNEX_IJKT_SUPPORT\n        if (video->advanced_INTRA | video->deblocking | video->modified_quant | video->modified_quant)\n        {\n            status = PV_FAIL;\n            goto return_point;\n        }\n#endif\n\n        if (UFEP == 0 || UFEP == 1)\n        {\n            tmpvar = BitstreamReadBits16(stream, 3);\n            if (tmpvar > 1)\n            {\n                status = PV_FAIL;\n                goto return_point;\n            }\n            currVop->predictionType = tmpvar;\n            /* RPR */\n            if (BitstreamRead1Bits(stream))\n            {\n                status = PV_FAIL;\n                goto return_point;\n            }\n\n            /* RRU */\n            if (BitstreamRead1Bits(stream))\n            {\n                status = PV_FAIL;\n                goto return_point;\n            }\n            currVop->roundingType = (int) BitstreamRead1Bits(stream);\n            if (BitstreamReadBits16(stream, 3) != 1)\n            {\n                status = PV_FAIL;\n                goto return_point;\n            }\n        }\n        else\n        {\n            status = PV_FAIL;\n            goto return_point;\n        }\n        /* CPM */\n        if (BitstreamRead1Bits(stream))\n        {\n            status = PV_FAIL;\n            goto return_point;\n        }\n        /* CPFMT */\n        if (custom_PFMT == 1 && UFEP == 1)\n        {\n            /* aspect ratio */\n            tmpvar = BitstreamReadBits16(stream, 4);\n            if (tmpvar == 0)\n            {\n                status = PV_FAIL;\n                goto return_point;\n            }\n            /* Extended PAR */\n            if (tmpvar == 0xF)\n            {\n                /* Read par_width and par_height but do nothing */\n                /* par_width */\n                tmpvar = BitstreamReadBits16(stream, 8);\n\n                /* par_height */\n                tmpvar = BitstreamReadBits16(stream, 8);\n            }\n            tmpvar = BitstreamReadBits16(stream, 9);\n\n            video->displayWidth = (tmpvar + 1) << 2;\n            video->width = (video->displayWidth + 15) & -16;\n            /* marker bit */\n            if (!BitstreamRead1Bits(stream))\n            {\n                status = PV_FAIL;\n                goto return_point;\n            }\n            tmpvar = BitstreamReadBits16(stream, 9);\n            if (tmpvar == 0)\n            {\n                status = PV_FAIL;\n                goto return_point;\n            }\n            video->displayHeight = tmpvar << 2;\n            video->height = (video->displayHeight + 15) & -16;\n\n            video->nTotalMB = video->width / MB_SIZE * video->height / MB_SIZE;\n\n            if (video->nTotalMB <= 48)\n            {\n                video->nBitsForMBID = 6;\n            }\n            else if (video->nTotalMB <= 99)\n            {\n                video->nBitsForMBID = 7;\n            }\n            else if (video->nTotalMB <= 396)\n            {\n                video->nBitsForMBID = 9;\n            }\n            else if (video->nTotalMB <= 1584)\n            {\n                video->nBitsForMBID = 11;\n            }\n            else if (video->nTotalMB <= 6336)\n            {\n                video->nBitsForMBID = 13 ;\n            }\n            else if (video->nTotalMB <= 9216)\n            {\n                video->nBitsForMBID = 14 ;\n            }\n            else\n            {\n                status = PV_FAIL;\n                goto return_point;\n            }\n        }\n        if (UFEP == 1 && custom_PCF == 1)\n        {\n            BitstreamRead1Bits(stream);\n\n            tmpvar = BitstreamReadBits16(stream, 7);\n            if (tmpvar == 0)\n            {\n                status = PV_FAIL;\n                goto return_point;\n            }\n        }\n\n        if (custom_PCF == 1)\n        {\n            currVop->ETR = BitstreamReadBits16(stream, 2);\n        }\n\n        if (UFEP == 1 && video->slice_structure == 1)\n        {\n            /* SSS */\n            tmpvar = BitstreamReadBits16(stream, 2);\n            if (tmpvar != 0)\n            {\n                status = PV_FAIL;\n                goto return_point;\n            }\n        }\n    }\n\n    /* Recalculate number of macroblocks per row & col since */\n    /*  the frame size can change.           04/23/2001.   */\n    video->nMBinGOB = video->nMBPerRow = video->width / MB_SIZE;\n    video->nGOBinVop = video->nMBPerCol = video->height / MB_SIZE;\n    video->nTotalMB = video->nMBPerRow * video->nMBPerCol;\n    if (custom_PFMT == 0  || UFEP == 0)\n    {\n        video->nBitsForMBID = CalcNumBits((uint)video->nTotalMB - 1); /* otherwise calculate above */\n    }\n    size = (int32)video->width * video->height;\n    if ((size > video->size) &&\n            ((video->currVop->predictionType == P_VOP) || (video->initialized == PV_TRUE)))\n    {\n        status = PV_FAIL;\n        goto return_point;\n    }\n\n#ifdef PV_MEMORY_POOL\n    video->videoDecControls->size = size;\n#endif\n\n    video->size = size;\n    video->currVop->uChan = video->currVop->yChan + size;\n    video->currVop->vChan = video->currVop->uChan + (size >> 2);\n    video->prevVop->uChan = video->prevVop->yChan + size;\n    video->prevVop->vChan = video->prevVop->uChan + (size >> 2);\n\n\n    currVop->quantizer = (int16) BitstreamReadBits16(stream, 5);\n\n    if (currVop->quantizer == 0)                          /*  04/03/01 */\n    {\n        currVop->quantizer = video->prevVop->quantizer;\n        status = PV_FAIL;\n        goto return_point;\n    }\n\n\n    /* Zero bit */\n    if (extended_PTYPE == FALSE)\n    {\n        if (BitstreamRead1Bits(stream))\n        {\n            mp4dec_log(\"DecodeShortHeader(): Zero bit wrong.\\n\");\n            status = PV_FAIL;\n            goto return_point;\n        }\n    }\n    /* pei */\n    tmpvar = (uint32) BitstreamRead1Bits(stream);\n\n    while (tmpvar)\n    {\n        tmpvar = (uint32) BitstreamReadBits16(stream, 8); /* \"PSPARE\" */\n        tmpvar = (uint32) BitstreamRead1Bits(stream); /* \"PEI\" */\n    }\n\n    if (video->slice_structure)  /* ANNEX_K */\n    {\n        if (!BitstreamRead1Bits(stream))  /* SEPB1 */\n        {\n            status = PV_FAIL;\n            goto return_point;\n        }\n\n        //  if (currVol->nBitsForMBID //\n        if (BitstreamReadBits16(stream, video->nBitsForMBID))\n        {\n            status = PV_FAIL;             /* no ASO, RS support for Annex K */\n            goto return_point;\n        }\n\n        if (!BitstreamRead1Bits(stream))  /*SEPB3 */\n        {\n            status = PV_FAIL;\n            goto return_point;\n        }\n\n    }\n    /* Setting of other VOP-header parameters */\n    currVop->gobNumber = 0;\n    currVop->vopCoded = 1;\n\n    currVop->intraDCVlcThr = 0;\n    currVop->gobFrameID = 0; /* initial value,  05/22/00 */\n    currVol->errorResDisable = 0;\n    /*PutVopInterlaced(0,curr_vop); no implemented yet */\n    if (currVop->predictionType != I_VOP)\n        currVop->fcodeForward = 1;\n    else\n        currVop->fcodeForward = 0;\n\nreturn_point:\n\n    return status;\n}\n\n\n/***********************************************************CommentBegin******\n*\n* -- PV_DecodeVop -- Decodes the VOP information from the bitstream\n*\n*   04/12/2000\n*                   Initial port to the new PV decoder library format.\n*                   This function is different from the one in MoMuSys MPEG-4\n*                   visual decoder.  We handle combined mode with or withput\n*                   error resilience and H.263 mode through the sam path now.\n*\n*   05/04/2000\n*                   Added temporal scalability to the decoder.\n*\n***********************************************************CommentEnd********/\nPV_STATUS PV_DecodeVop(VideoDecData *video)\n{\n    Vol *currVol = video->vol[video->currLayer];\n    PV_STATUS status;\n    uint32 tmpvar;\n\n    /*****\n    *   Do scalable or non-scalable decoding of the current VOP\n    *****/\n\n    if (!currVol->scalability)\n    {\n        if (currVol->dataPartitioning)\n        {\n            /* Data partitioning mode comes here */\n            status = DecodeFrameDataPartMode(video);\n        }\n        else\n        {\n            /* Combined mode with or without error resilience */\n            /*    and short video header comes here.          */\n            status = DecodeFrameCombinedMode(video);\n        }\n    }\n    else\n    {\n#ifdef DO_NOT_FOLLOW_STANDARD\n        /* according to the standard, only combined mode is allowed */\n        /*    in the enhancement layer.          06/01/2000.        */\n        if (currVol->dataPartitioning)\n        {\n            /* Data partitioning mode comes here */\n            status = DecodeFrameDataPartMode(video);\n        }\n        else\n        {\n            /* Combined mode with or without error resilience */\n            /*    and short video header comes here.          */\n            status = DecodeFrameCombinedMode(video);\n        }\n#else\n        status = DecodeFrameCombinedMode(video);\n#endif\n    }\n\n    /* This part is for consuming Visual_object_sequence_end_code and EOS Code */   /*  10/15/01 */\n    if (!video->shortVideoHeader)\n    {\n        /* at this point bitstream is expected to be byte aligned */\n        BitstreamByteAlignNoForceStuffing(currVol->bitstream);\n\n        status = BitstreamShowBits32HC(currVol->bitstream, &tmpvar);  /*  07/07/01 */\n        if (tmpvar == VISUAL_OBJECT_SEQUENCE_END_CODE)/* VOS_END_CODE */\n        {\n            PV_BitstreamFlushBits(currVol->bitstream, 16);\n            PV_BitstreamFlushBits(currVol->bitstream, 16);\n        }\n\n    }\n    else\n    {\n#ifdef PV_ANNEX_IJKT_SUPPORT\n        if (video->deblocking)\n        {\n            H263_Deblock(video->currVop->yChan, video->width, video->height, video->QPMB, video->headerInfo.Mode, 0, 0);\n            H263_Deblock(video->currVop->uChan, video->width >> 1, video->height >> 1, video->QPMB, video->headerInfo.Mode, 1, video->modified_quant);\n            H263_Deblock(video->currVop->vChan, video->width >> 1, video->height >> 1, video->QPMB, video->headerInfo.Mode, 1, video->modified_quant);\n        }\n#endif\n        /* Read EOS code for shortheader bitstreams    */\n        status = BitstreamShowBits32(currVol->bitstream, 22, &tmpvar);\n        if (tmpvar == SHORT_VIDEO_END_MARKER)\n        {\n            PV_BitstreamFlushBits(currVol->bitstream, 22);\n        }\n        else\n        {\n            status = PV_BitstreamShowBitsByteAlign(currVol->bitstream, 22, &tmpvar);\n            if (tmpvar == SHORT_VIDEO_END_MARKER)\n            {\n                PV_BitstreamByteAlign(currVol->bitstream);\n                PV_BitstreamFlushBits(currVol->bitstream, 22);\n            }\n        }\n    }\n    return status;\n}\n\n\n/***********************************************************CommentBegin******\n*\n* -- CalcVopDisplayTime -- calculate absolute time when VOP is to be displayed\n*\n*   04/12/2000 Initial port to the new PV decoder library format.\n*\n***********************************************************CommentEnd********/\nuint32 CalcVopDisplayTime(Vol *currVol, Vop *currVop, int shortVideoHeader)\n{\n    uint32 display_time;\n\n\n    /*****\n    *   Calculate the time when the VOP is to be displayed next\n    *****/\n\n    if (!shortVideoHeader)\n    {\n        display_time = (uint32)(currVol->moduloTimeBase + (((int32)currVop->timeInc - (int32)currVol->timeInc_offset) * 1000) / ((int32)currVol->timeIncrementResolution));  /*  11/12/2001 */\n        if (currVop->timeStamp >= display_time)\n        {\n            display_time += 1000;  /* this case is valid if GOVHeader timestamp is ignored */\n        }\n    }\n    else\n    {\n        display_time = (uint32)(currVol->moduloTimeBase * 33 + (currVol->moduloTimeBase * 11) / 30); /*  11/12/2001 */\n    }\n\n    return(display_time);\n}\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/yuv2rgb.cpp",
    "content": "/*\n * yuv2rgb.cpp\n *\n *  Created on: 29 juil. 2009\n *      Author: rglt1266\n */\n#include <stdio.h>\n#include \"yuv2rgb.h\"\n\nint convert (int width,int height, uint8 *in,uint32 *out){\n\tuint8 *pY;\n\tuint8 *pU;\n\tuint8 *pV;\n\tint Y,U,V;\n\tint i,j;\n\tint R,G,B,Cr,Cb;\n\n\t/* Init */\n\tpY = in;\n\tpU = in + (width*height);\n\tpV = pU + (width*height/4);\n\n\tfor(i=0;i<height;i++){\n\t\tfor(j=0;j<width;j++){\n\t\t\t/* YUV values uint */\n\t\t\tY=*((pY)+ (i*width) + j);\n\t\t\tU=*( pU + (j/2) + ((width/2)*(i/2)));\n\t\t\tV=*( pV + (j/2) + ((width/2)*(i/2)));\n\t\t\t/* RBG values */\n\t\t\tCr = V-128;\n\t\t\tCb = U-128;\n\t\t\tR = Y + ((359*Cr)>>8);\n\t\t\tG = Y - ((88*Cb+183*Cr)>>8);\n\t\t\tB = Y + ((454*Cb)>>8);\n\t\t\tif (R>255)R=255; else if (R<0)R=0;\n\t\t\tif (G>255)G=255; else if (G<0)G=0;\n\t\t\tif (B>255)B=255; else if (B<0)B=0;\n\n\t\t\t/* Write data */\n\t\t\tout[((i*width) + j)]=((((R & 0xFF) << 16) | ((G & 0xFF) << 8) | (B & 0xFF))& 0xFFFFFFFF);\n\t\t}\n\t}\n\treturn 1;\n}\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/yuv2rgb.h",
    "content": "/*\n * yuv2rgb.h\n *\n *  Created on: 29 juil. 2009\n *      Author: rglt1266\n */\n\n#include \"oscl_types.h\"\n\n#ifndef YUV2RGB_H_\n#define YUV2RGB_H_\n\nint convert (int width,int height, uint8 *in,uint32 *out);\n\n#endif /* YUV2RGB_H_ */\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/zigzag.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n\n#ifndef zigzag_H\n#define zigzag_H\n\n/*----------------------------------------------------------------------------\n; INCLUDES\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; MACROS\n; Define module specific macros here\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; DEFINES\n; Include all pre-processor statements here.\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; EXTERNAL VARIABLES REFERENCES\n; Declare variables used in this module but defined elsewhere\n----------------------------------------------------------------------------*/\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n    extern const int zigzag_inv[3*NCOEFF_BLOCK];\n    /*----------------------------------------------------------------------------\n    ; SIMPLE TYPEDEF'S\n    ----------------------------------------------------------------------------*/\n\n    /*----------------------------------------------------------------------------\n    ; ENUMERATED TYPEDEF'S\n    ----------------------------------------------------------------------------*/\n\n    /*----------------------------------------------------------------------------\n    ; STRUCTURES TYPEDEF'S\n    ----------------------------------------------------------------------------*/\n\n    /*----------------------------------------------------------------------------\n    ; GLOBAL FUNCTION DEFINITIONS\n    ; Function Prototype declaration\n    ----------------------------------------------------------------------------*/\n\n    /*----------------------------------------------------------------------------\n    ; END\n    ----------------------------------------------------------------------------*/\n#endif\n\n#ifdef __cplusplus\n}\n#endif\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/dec/src/zigzag_tab.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n\n#include    \"mp4dec_api.h\"\n#include    \"mp4def.h\"\n#include    \"zigzag.h\"\n/*----------------------------------------------------------------------------\n; MACROS\n; Define module specific macros here\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; DEFINES\n; Include all pre-processor statements here. Include conditional\n; compile variables also.\n----------------------------------------------------------------------------*/\n\n/*----------------------------------------------------------------------------\n; LOCAL FUNCTION DEFINITIONS\n; Function Prototype declaration\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; LOCAL STORE/BUFFER/POINTER DEFINITIONS\n; Variable declaration - defined here and used outside this module\n----------------------------------------------------------------------------*/\nconst int zigzag_inv[3*NCOEFF_BLOCK] =\n{\n    0,  1,  8, 16,  9,  2,  3, 10,\n    17, 24, 32, 25, 18, 11,  4,  5,\n    12, 19, 26, 33, 40, 48, 41, 34,\n    27, 20, 13,  6,  7, 14, 21, 28,\n    35, 42, 49, 56, 57, 50, 43, 36,\n    29, 22, 15, 23, 30, 37, 44, 51,\n    58, 59, 52, 45, 38, 31, 39, 46,\n    53, 60, 61, 54, 47, 55, 62, 63,\n//};\n\n    /* Vertical inverse zigzag */\n//const static Int zigzag_v_inv[NCOEFF_BLOCK] = {\n    0, 8, 16, 24, 1, 9, 2, 10,\n    17, 25, 32, 40, 48, 56, 57, 49,\n    41, 33, 26, 18, 3, 11, 4, 12,\n    19, 27, 34, 42, 50, 58, 35, 43,\n    51, 59, 20, 28, 5, 13, 6, 14,\n    21, 29, 36, 44, 52, 60, 37, 45,\n    53, 61, 22, 30, 7, 15, 23, 31,\n    38, 46, 54, 62, 39, 47, 55, 63,\n//};\n    /* Horizontal inverse zigzag*/\n//const static Int zizag_h_inv[NCOEFF_BLOCK] = {\n    0, 1, 2, 3, 8, 9, 16, 17,\n    10, 11, 4, 5, 6, 7, 15, 14,\n    13, 12, 19, 18, 24, 25, 32, 33,\n    26, 27, 20, 21, 22, 23, 28, 29,\n    30, 31, 34, 35, 40, 41, 48, 49,\n    42, 43, 36, 37, 38, 39, 44, 45,\n    46, 47, 50, 51, 56, 57, 58, 59,\n    52, 53, 54, 55, 60, 61, 62, 63\n};\n\n/*----------------------------------------------------------------------------\n; EXTERNAL FUNCTION REFERENCES\n; Declare functions defined elsewhere and referenced in this module\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; EXTERNAL GLOBAL STORE/BUFFER/POINTER REFERENCES\n; Declare variables used in this module but defined elsewhere\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; FUNCTION CODE\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; Define all local variables\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; Function body here\n----------------------------------------------------------------------------*/\n\n\n/*----------------------------------------------------------------------------\n; Return nothing or data or data pointer\n----------------------------------------------------------------------------*/\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/Android.mk",
    "content": "#\n# Copyright (C) 2008 The Android Open Source Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n# This makefile supplies the rules for building a library of JNI code for\n# use by our example platform shared library.\n\nLOCAL_PATH:= $(call my-dir)\ninclude $(CLEAR_VARS)\n\nLOCAL_MODULE_TAGS := optional\n\n# This is the target being built.\nLOCAL_MODULE:= libH263Encoder\n\n# All of the source files that we will compile.\nLOCAL_SRC_FILES:= \\\n\tsrc/com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder.cpp \\\n\tsrc/bitstream_io.cpp \\\n\tsrc/combined_encode.cpp \\\n\tsrc/datapart_encode.cpp \\\n\tsrc/dct.cpp \\\n\tsrc/fastcodemb.cpp \\\n\tsrc/fastidct.cpp \\\n\tsrc/fastquant.cpp \\\n\tsrc/findhalfpel.cpp \\\n\tsrc/me_utils.cpp \\\n\tsrc/motion_comp.cpp \\\n\tsrc/motion_est.cpp \\\n\tsrc/mp4enc_api.cpp \\\n\tsrc/rate_control.cpp \\\n\tsrc/sad.cpp \\\n\tsrc/sad_halfpel.cpp \\\n\tsrc/vlc_encode.cpp \\\n\tsrc/vop.cpp\n\n# All of the shared libraries we link against.\nLOCAL_SHARED_LIBRARIES := \n\n# No static libraries.\nLOCAL_STATIC_LIBRARIES :=\n\n# Also need the JNI headers.\nLOCAL_C_INCLUDES += \\\n\t$(JNI_H_INCLUDE) \\\n\t$(LOCAL_PATH)/src \\\n \t$(LOCAL_PATH)/include \\\n\t$(LOCAL_PATH)/oscl\n\n# No specia compiler flags.\nLOCAL_CFLAGS +=\n\n# Don't prelink this library.  For more efficient code, you may want\n# to add this library to the prelink map and set this to true.\nLOCAL_PRELINK_MODULE := false\n\ninclude $(BUILD_SHARED_LIBRARY)\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/include/cvei.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2010 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*********************************************************************************/\n/*  File: cvei.h                                                                */\n/** @introduction   Common Video Encoder Interface (CVEI) is intended to be used by\n    application developers who want to create a multimedia application with video\n    encoding feature. CVEI is designed such that new video encoder algorithms or\n    modules can be plugged in seamlessly without user interaction. In other words,\n    any changes to the CVEI library are transparent to the users. Users can still\n    use the same set of APIs for new encoding tools.\n\n    @requirement    CVEI will take an input frame in one of several format supported\n    by PV and encode it to an MPEG4 bitstream. It will also return a reconstructed\n    image in YUV 4:2:0 format. Currently the input format supported are YUV 4:2:0,\n    RGB24 and UYVY 4:2:2.\n\n    CVEI is designed such that it is simple to use. It should hides implementation\n    dependency  from the users. In this version, we decided that the operation will\n    be synchronous, i.e., the encoding will be a blocked call. Asynchronous operation\n    will be in the level above CVEI, i.e., in Author Engine Video Module which will\n    take care of capturing device as well.\n\n    @brief  The following classes are used to interface with codecs. Their names\n    are CPVxxxVideoEncoder where xxx is codec specific such as MPEG4, H263, H26L,\n    etc. All of them are subclasses of CPVCommonVideoEncoder.\n*/\n/*********************************************************************************/\n\n#ifndef __CVEI_H\n#define __CVEI_H\n\n#include \"oscl_scheduler_ao.h\"\n#include \"oscl_base.h\"\n#include \"mp4enc_api.h\" /* for MP4HintTrack */\n\n#define MAX_LAYER 2\n\n/** General returned values. */\nenum TCVEI_RETVAL\n{\n    ECVEI_SUCCESS,\n    ECVEI_FAIL,\n    ECVEI_FLUSH,\n    ECVEI_MORE_OUTPUT\n} ;\n\n/** Returned events with the callback function. */\nenum TCVEI_EVENT\n{\n    /** Called when a packet or a frame of output bitstream is ready. */\n    ECVEI_BUFFER_READY,\n\n    /** Called when the last packet of a frame of output bitstream is ready. */\n    ECVEI_FRAME_DONE,\n\n    /** Called when no buffers is available for output bitstream. A buffer can be added thru AddBuffer API. */\n    ECVEI_NO_BUFFERS,\n\n    /** Called when there is an error with the encoding operation. */\n    ECVEI_ERROR\n};\n\n/** Contains supported input format */\nenum TPVVideoFormat\n{\n    ECVEI_RGB24,\n    ECVEI_RGB12,\n    ECVEI_YUV420,\n    ECVEI_UYVY,\n    ECVEI_YUV420SEMIPLANAR\n};\n\n/** Type of contents for optimal encoding mode. */\nenum TPVContentType\n{\n    /** Content is to be streamed in real-time. */\n    ECVEI_STREAMING,\n\n    /** Content is to be downloaded and playbacked later.*/\n    ECVEI_DOWNLOAD,\n\n    /** Content is to be 3gpp baseline compliant. */\n    ECVEI_H263\n};\n\n/** Rate control type. */\nenum TMP4RateControlType\n{\n    /** Constant quality, variable bit rate, fixed quantization level. */\n    ECONSTANT_Q,\n\n    /** Short-term constant bit rate control. */\n    ECBR_1,\n\n    /** Long-term constant bit rate control. */\n    EVBR_1\n};\n\n/** Targeted profile and level to encode. */\nenum TPVM4VProfileLevel\n{\n    /* Non-scalable profile */\n    ECVEI_SIMPLE_LEVEL0 = 0,\n    ECVEI_SIMPLE_LEVEL1,\n    ECVEI_SIMPLE_LEVEL2,\n    ECVEI_SIMPLE_LEVEL3,\n    ECVEI_SIMPLE_LEVEL4A,\n    ECVEI_SIMPLE_LEVEL5,\n    ECVEI_CORE_LEVEL1,\n    ECVEI_CORE_LEVEL2,\n\n    /* Scalable profile */\n    ECVEI_SIMPLE_SCALABLE_LEVEL0,\n    ECVEI_SIMPLE_SCALABLE_LEVEL1,\n    ECVEI_SIMPLE_SCALABLE_LEVEL2,\n    ECVEI_CORE_SCALABLE_LEVEL1,\n    ECVEI_CORE_SCALABLE_LEVEL2,\n    ECVEI_CORE_SCALABLE_LEVEL3\n};\n\n/** This structure contains encoder settings. */\nstruct TPVVideoEncodeParam\n{\n    /** Specifies an  ID that will be used to specify this encoder while returning\n    the bitstream in asynchronous mode. */\n    uint32              iEncodeID;\n\n    /** Specifies whether base only (iNumLayer = 1) or base + enhancement layer\n    (iNumLayer =2 ) is to be used. */\n    int32               iNumLayer;\n\n    /** Specifies the width in pixels of the encoded frames. IFrameWidth[0] is for\n    base layer and iFrameWidth[1] is for enhanced layer. */\n    int                 iFrameWidth[MAX_LAYER];\n\n    /** Specifies the height in pixels of the encoded frames. IFrameHeight[0] is for\n    base layer and iFrameHeight[1] is for enhanced layer. */\n    int                 iFrameHeight[MAX_LAYER];\n\n    /** Specifies the cumulative bit rate in bit per second. IBitRate[0] is for base\n    layer and iBitRate[1] is for base+enhanced layer.*/\n    int                 iBitRate[MAX_LAYER];\n\n    /** Specifies the cumulative frame rate in frame per second. IFrameRate[0] is for\n    base layer and iFrameRate[1] is for base+enhanced layer. */\n    float               iFrameRate[MAX_LAYER];\n\n    /** Specifies the picture quality factor on the scale of 1 to 10. It trades off\n    the picture quality with the frame rate. Higher frame quality means lower frame rate.\n    Lower frame quality for higher frame rate.*/\n    int32               iFrameQuality;\n\n    /** Enable the use of iFrameQuality to determine the frame rate. If it is false,\n    the encoder will try to meet the specified frame rate regardless of the frame quality.*/\n    bool                iEnableFrameQuality;\n\n    /** Specifies the maximum number of P-frames between 2 INTRA frames. An INTRA mode is\n    forced to a frame once this interval is reached. When there is only one I-frame is present\n    at the beginning of the clip, iIFrameInterval should be set to -1. */\n    int32               iIFrameInterval;\n\n    /** According to iIFrameInterval setting, the minimum number of intra MB per frame is\n    optimally calculated for error resiliency. However, when iIFrameInterval is set to -1,\n    iNumIntraMBRefresh must be specified to guarantee the minimum number of intra\n    macroblocks per frame.*/\n    uint32              iNumIntraMBRefresh;\n\n    /** Specifies the VBV buffer size which determines the end-to-end delay between the\n    encoder and the decoder.  The size is in unit of seconds. For download application,\n    the buffer size can be larger than the streaming application. For 2-way application,\n    this buffer shall be kept minimal. For a special case, in VBR mode, iBufferDelay will\n    be set to -1 to allow buffer underflow. */\n    float               iBufferDelay;\n\n    /** Specifies the type of the access whether it is streaming, CVEI_STREAMING\n    (data partitioning mode) or download, CVEI_DOWNLOAD (combined mode).*/\n    TPVContentType      iContentType;\n\n    /** Specifies the rate control algorithm among one of the following constant Q,\n    CBR and VBR.  The structure TMP4RateControlType is defined below.*/\n    TMP4RateControlType iRateControlType;\n\n    /** Specifies high quality but also high complexity mode for rate control. */\n    bool                iRDOptimal;\n\n    /** Specifies the initial quantization parameter for the first I-frame. If constant Q\n    rate control is used, this QP will be used for all the I-frames. This number must be\n    set between 1 and 31, otherwise, Initialize() will fail. */\n    int                 iIquant[2];\n\n    /** Specifies the initial quantization parameter for the first P-frame. If constant Q\n    rate control is used, this QP will be used for all the P-frames. This number must be\n    set between 1 and 31, otherwise, Initialize() will fail. */\n    int                 iPquant[2];\n\n    /** Specifies the initial quantization parameter for the first B-frame. If constant Q\n    rate control is used, this QP will be used for all the B-frames. This number must be\n    set between 1 and 31, otherwise, Initialize() will fail. */\n    int                 iBquant[2];\n\n    /** Specifies the search range in pixel unit for motion vector. The range of the\n    motion vector will be of dimension [-iSearchRange.5, +iSearchRange.0]. */\n    int32               iSearchRange;\n\n    /** Specifies the use of 8x8 motion vectors. */\n    bool                iMV8x8;\n\n    /** Specifies the use of half-pel motion vectors. */\n    bool                iMVHalfPel;\n\n    /** Specifies automatic scene detection where I-frame will be used the the first frame\n    in a new scene. */\n    bool                iSceneDetection;\n\n    /** Specifies the packet size in bytes which represents the number of bytes between two resync markers.\n    For ECVEI_DOWNLOAD and ECVEI_H263, if iPacketSize is set to 0, there will be no resync markers in the bitstream.\n    For ECVEI_STREAMING is parameter must be set to a value greater than 0.*/\n    uint32              iPacketSize;\n\n    /** Specifies whether the current frame skipping decision is allowed after encoding\n    the current frame. If there is no memory of what has been coded for the current frame,\n    iNoCurrentSkip has to be on. */\n    bool                iNoCurrentSkip;\n\n    /** Specifies that no frame skipping is allowed. Frame skipping is a tool used to\n    control the average number of bits spent to meet the target bit rate. */\n    bool                iNoFrameSkip;\n\n    /** Specifies the duration of the clip in millisecond.*/\n    int32               iClipDuration;\n\n    /** Specifies the profile and level used to encode the bitstream. When present,\n    other settings will be checked against the range allowable by this target profile\n    and level. Fail may be returned from the Initialize call. */\n    TPVM4VProfileLevel  iProfileLevel;\n\n    /** Specifies FSI Buffer input */\n    uint8*              iFSIBuff;\n\n    /** Specifies FSI Buffer Length */\n    int             iFSIBuffLength;\n\n\n};\n\n\n/** Structure for input format information */\nstruct TPVVideoInputFormat\n{\n    /** Contains the width in pixels of the input frame. */\n    int32           iFrameWidth;\n\n    /** Contains the height in pixels of the input frame. */\n    int32           iFrameHeight;\n\n    /** Contains the input frame rate in the unit of frame per second. */\n    float           iFrameRate;\n\n    /** Contains Frame Orientation. Used for RGB input. 1 means Bottom_UP RGB, 0 means Top_Down RGB, -1 for video formats other than RGB*/\n    int             iFrameOrientation;\n\n    /** Contains the format of the input video, e.g., YUV 4:2:0, UYVY, RGB24, etc. */\n    TPVVideoFormat  iVideoFormat;\n};\n\n\n/** Contains the input data information */\nstruct TPVVideoInputData\n{\n    /** Pointer to an input frame buffer in input source format.*/\n    uint8       *iSource;\n\n    /** The corresponding time stamp of the input frame. */\n    uint32      iTimeStamp;\n};\n\n/** Contains the output data information */\nstruct TPVVideoOutputData\n{\n    /** Pointer to the reconstructed frame buffer in YUV 4:2:0 domain. */\n    uint8           *iFrame;\n\n    /** The number of layer encoded, 0 for base, 1 for enhanced. */\n    int32           iLayerNumber;\n\n    /** Pointer to the encoded bitstream buffer. */\n    uint8           *iBitStream;\n\n    /** The size in bytes of iBStream. */\n    int32           iBitStreamSize;\n\n    /** The time stamp of the encoded frame according to the bitstream. */\n    uint32          iVideoTimeStamp;\n\n    /** The time stamp of the encoded frame as given before the encoding. */\n    uint32          iExternalTimeStamp;\n\n    /** The hint track information. */\n    MP4HintTrack    iHintTrack;\n};\n\n/** An observer class for callbacks to report the status of the CVEI */\nclass MPVCVEIObserver\n{\n    public:\n        /** The callback funtion with aEvent being one of TCVEIEvent enumeration. */\n        virtual void HandlePVCVEIEvent\n        (uint32 aId, uint32 aEvent, uint32 aParam1 = 0) = 0;\n        virtual ~MPVCVEIObserver() {}\n};\n\n/** This class is the base class for codec specific interface class.\nThe users must maintain an instance of the codec specific class throughout\nthe encoding session.\n*/\nclass CommonVideoEncoder : public OsclTimerObject\n{\n    public:\n        /** Constructor for CVEI class. */\n        CommonVideoEncoder() : OsclTimerObject(OsclActiveObject::EPriorityNominal, \"PVEncoder\") {};\n\n        /** Initialization function to set the input video format and the\n        encoding parameters. This function returns CVEI_ERROR if there is\n        any errors. Otherwise, the function returns CVEI_SUCCESS.*/\n        virtual  TCVEI_RETVAL Initialize(TPVVideoInputFormat *aVidInFormat, TPVVideoEncodeParam *aEncParam) = 0;\n\n        /** Set the observer for asynchronous encoding mode. */\n        virtual  TCVEI_RETVAL SetObserver(MPVCVEIObserver *aObserver) = 0;\n\n        /** Add a buffer to the queue of output buffers for output bitstream in\n        asynchronous encoding mode. */\n        virtual  TCVEI_RETVAL AddBuffer(TPVVideoOutputData *aVidOut) = 0;\n\n        /** This function sends in an input video data structure containing a source\n        frame and the associated timestamp. The encoded bitstream will be returned by\n        observer callback.\n        The above 3 APIs only replace EncodeFrame() API. Other APIs such as initialization\n        and update parameters remain the same. */\n        virtual  TCVEI_RETVAL Encode(TPVVideoInputData *aVidIn) = 0;\n\n        /** This function returns the maximum VBV buffer size such that the\n            application can allocate a buffer that guarantees to fit one frame.*/\n        virtual  int32 GetBufferSize() = 0;\n\n        /** This function returns the VOL header part (starting from the VOS header)\n        of the encoded bitstream. This function must be called after Initialize.\n        The output is written to the memory (volHeader) allocated by the users.*/\n        virtual  TCVEI_RETVAL GetVolHeader(uint8 *volHeader, int32 *size, int32 layer) = 0;\n\n        /** This function sends in an input video data structure containing a source\n        frame and the associated timestamp. It returns an output video data structure\n        containing coded bit stream, reconstructed frame in YUV 4:2:0 (can be changed\n        to source format) and the timestamp associated with the coded frame.\n        The input timestamp may not correspond to the output timestamp. User can send\n        an input structure in without getting any encoded data back or getting an encoded\n        frame in the past. This function returns ECVEI_ERROR if there is any errors.\n        Otherwise, the function returns ECVEI_SUCCESS.\n        In case of Overrun Buffer usage, it is possible that return value is ECVEI_MORE_OUTPUT\n        which indicates that frame cannot fit in the current buffer*/\n        virtual  TCVEI_RETVAL EncodeFrame(TPVVideoInputData  *aVidIn, TPVVideoOutputData *aVidOut, int *aRemainingBytes\n#ifdef PVAUTHOR_PROFILING\n                                          , void *aParam1 = 0\n#endif\n                                         ) = 0;\n\n        /** Before the termination of the encoding process, the users have to query\n        whether there are any encoded frame pending inside the CVEI. The returned value\n        will indicate whether there are more frames to be flushed (ECVEI_FLUSH).\n        FlushOutput has to be called until there are no more frames, i.e., it returns\n        ECVEI_SUCCESS. This function may be called during the encoding operation if\n        there is no input frame and the application does not want to waste the time\n        waiting for input frame. It can call this function to flush encoded frame\n        out of the memory. */\n        virtual  TCVEI_RETVAL FlushOutput(TPVVideoOutputData *aVidOut) = 0;\n\n        /** This function cleanup the CVEI allocated resources. */\n        virtual  TCVEI_RETVAL Terminate() = 0;\n\n        /**This function dynamically changes the target bit rate of the encoder\n        while encoding. aBitRate[n] is the new accumulate target bit rate of layer n.\n        Successful update is returned with ECVEI_SUCCESS.*/\n        virtual  TCVEI_RETVAL UpdateBitRate(int32 aNumLayer, int32 *aBitRate) = 0;\n\n        /** This function dynamically changes the target frame rate of the encoder\n        while encoding. aFrameRate[n] is the new accumulate target frame rate of\n        layer n. Successful update is returned with ECVEI_SUCCESS. */\n        virtual  TCVEI_RETVAL UpdateFrameRate(int32 aNumLayer, float *aFrameRate) = 0;\n\n        /** This function dynamically changes the I-Vop update interval while\n        encoding to a new value, aIFrameInterval. */\n        virtual  TCVEI_RETVAL UpdateIFrameInterval(int32 aIFrameInterval) = 0;\n\n        /** This function forces an I-Vop mode to the next frame to be encoded. */\n        virtual  TCVEI_RETVAL IFrameRequest() = 0;\n\n        /** This function returns the input width of a specific layer\n        (not necessarily multiple of 16). */\n        virtual  int32 GetEncodeWidth(int32 aLayer) = 0;\n\n        /** This function returns the input height of a specific layer\n        (not necessarily multiple of 16). */\n        virtual  int32 GetEncodeHeight(int32 aLayer) = 0;\n\n        /** This function returns the target encoded frame rate of a specific layer. */\n        virtual  float GetEncodeFrameRate(int32 aLayer) = 0;\n    protected:\n        virtual void Run(void) = 0;\n        virtual void DoCancel(void) = 0;\n        /* internal enum */\n        enum TCVEIState\n        {\n            EIdle,\n            EEncode\n        };\n\n        TCVEIState  iState;\n        uint32      iId;\n};\n\n#endif\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/include/mp4enc_api.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2010 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef _MP4ENC_API_H_\n#define _MP4ENC_API_H_\n\n#ifndef OSCL_BASE_H_INCLUDED\n#include \"oscl_base.h\"\n#endif\n\n#ifndef _PV_TYPES_\n#define _PV_TYPES_\ntypedef unsigned char UChar;\ntypedef char Char;\ntypedef unsigned int UInt;\ntypedef int Int;\ntypedef unsigned short UShort;\ntypedef short Short;\ntypedef unsigned int Bool;\ntypedef unsigned long ULong;\n\n#define PV_CODEC_INIT  0\n#define PV_CODEC_STOP  1\n#endif\n\n#define PV_TRUE  1\n#define PV_FALSE 0\n\ntypedef enum\n{\n    SHORT_HEADER,\n    SHORT_HEADER_WITH_ERR_RES,\n    H263_MODE,\n    H263_MODE_WITH_ERR_RES,\n    DATA_PARTITIONING_MODE,\n    COMBINE_MODE_NO_ERR_RES,\n    COMBINE_MODE_WITH_ERR_RES\n\n} MP4EncodingMode;\n\ntypedef enum\n{\n    CONSTANT_Q,\n    CBR_1,\n    VBR_1,\n    CBR_2,\n    VBR_2,\n    CBR_LOWDELAY\n} MP4RateControlType;\n\ntypedef enum\n{\n    PASS1,\n    PASS2\n} PassNum;\n\ntypedef enum\n{\n    PV_OFF,\n    PV_ON\n} ParamEncMode;\n\n\n/* {SPL0, SPL1, SPL2, SPL3, SPL4a, SPL5, CPL1, CPL2, CPL2, CPL2} , SPL0: Simple Profile@Level0 , CPL1: Core Profile@Level1 */\n/* {SSPL0, SSPL1, SSPL2, SSPL2, CSPL1, CSPL2, CSPL3, CSPL3} , SSPL0: Simple Scalable Profile@Level0, CPL1: Core Scalable Profile@Level1 */\n\ntypedef enum\n{\n    /* Non-scalable profile */\n    SIMPLE_PROFILE_LEVEL0 = 0,\n    SIMPLE_PROFILE_LEVEL1,\n    SIMPLE_PROFILE_LEVEL2,\n    SIMPLE_PROFILE_LEVEL3,\n    SIMPLE_PROFILE_LEVEL4A,\n    SIMPLE_PROFILE_LEVEL5,\n    CORE_PROFILE_LEVEL1,\n    CORE_PROFILE_LEVEL2,\n    MAX_BASE_PROFILE = CORE_PROFILE_LEVEL2,\n\n    /* Scalable profile */\n    SIMPLE_SCALABLE_PROFILE_LEVEL0 = MAX_BASE_PROFILE + 1,\n    SIMPLE_SCALABLE_PROFILE_LEVEL1,\n    SIMPLE_SCALABLE_PROFILE_LEVEL2,\n    CORE_SCALABLE_PROFILE_LEVEL1,\n    CORE_SCALABLE_PROFILE_LEVEL2,\n    CORE_SCALABLE_PROFILE_LEVEL3,\n    MAX_SCALABLE_PROFILE = CORE_SCALABLE_PROFILE_LEVEL3\n\n} ProfileLevelType;\n\n\ntypedef struct tagMP4HintTrack\n{\n    UChar   MTB;\n    UChar   LayerID;\n    UChar   CodeType;\n    UChar   RefSelCode;\n} MP4HintTrack;\n\ntypedef struct tagvideoEncControls\n{\n    void            *videoEncoderData;\n    Int             videoEncoderInit;\n} VideoEncControls;\n\n\ntypedef struct tagvideoEncFrameIO\n{\n    UChar   *yChan; /* pointer to Y */\n    UChar   *uChan; /* pointer to U */\n    UChar   *vChan; /* pointer to V */\n    Int     height; /* height for Y */\n    Int     pitch;  /* stride  for Y */\n    ULong   timestamp; /* modulo timestamp in millisecond*/\n\n}   VideoEncFrameIO  ;\n\n/**\n@brief  Encoding options structure */\ntypedef struct tagvideoEncOptions\n{\n    /** @brief Sets the encoding mode, defined by the above enumaration. If there are conflicts between the encoding mode\n    *   and subsequent encoding options, encoding mode take precedent over encoding options. */\n    MP4EncodingMode     encMode;\n\n    /** @brief Sets the number of bytes per packet, only used in DATA_PARTITIONING_MODE or COMBINE_MODE_WITH_ERR_RES mode.\n    *           The resync marker will be inserted as often as the size of the packet.*/\n    Int                 packetSize;\n\n    /** @brief Selects MPEG-4/H.263 profile and level, if specified other encoding options must conform with it. */\n    ProfileLevelType    profile_level;\n\n    /** @brief Enables reversible variable length code (RVLC) mode. Normally it is set to PV_OFF.*/\n    ParamEncMode        rvlcEnable;\n\n    /** @brief Set the frequency of GOB header interval */\n    Int                 gobHeaderInterval;\n\n    /** @brief Sets the number of bitstream layers: 1 is base only: 2 is base + enhancement */\n    Int                 numLayers;\n\n    /** @brief Sets the number of ticks per second used for timing information encoded in MPEG4 bitstream.*/\n    Int                 timeIncRes;\n\n    /** @brief Sets the number of ticks in time increment resolution between 2 source frames (equivalent to source frame rate). */\n    Int                 tickPerSrc;\n\n    /** @brief Specifies encoded heights in pixels, height[n] represents the n-th layer's height. */\n    Int                 encHeight[2];\n\n    /** @brief Specifies encoded widths in pixels, width[n] represents the n-th layer's width.*/\n    Int                 encWidth[2];\n\n    /** @brief Specifies target frame rates in frames per second, frameRate[n] represents the n-th layer's target frame rate.*/\n    float               encFrameRate[2];\n\n    /** @brief Specifies target bit rates in bits per second unit, bitRate[n] represents the n-th layer's target bit rate. */\n    Int                 bitRate[2];\n\n    /** @brief Specifies default quantization parameters for I-Vop. Iquant[n] represents the n-th layer default quantization parameter. The default is Iquant[0]=12.*/\n    Int                 iQuant[2];\n\n    /** @brief Specifies default quantization parameters for P-Vop. Pquant[n] represents the n-th layer default quantization parameter. The default is Pquant[0]=10.*/\n    Int                 pQuant[2];\n\n    /** @brief  specifies quantization mode (H263 mode or MPEG mode) of the encoded base and enhance layer (if any).\n    *           In Simple and Simple Scalable profile, we use only H263 mode.*/\n    Int                 quantType[2];\n\n    /** @brief Sets rate control algorithm, one of (CONSTANT_Q, CBR_1, or VBR_1).\n    *           CONSTANT_Q uses the default quantization values to encode the sequence.\n    *           CBR_1 (constant bit rate) controls the output at a desired bit rate\n    *           VBR_1 (variable bit rate) gives better picture quality at the expense of bit rate fluctuation\n    *           Note:   type=CONSTANT_Q produces sequences with arbitrary bit rate.\n    *                   type=CBR_1 produces sequences suitable for streaming.\n    *                   type=VBR_1 produces sequences suitable for download. */\n    MP4RateControlType  rcType;\n\n    /** @brief  Sets the VBV buffer size (in the unit of second delay) used to prevent buffer overflow and underflow\n    *           on the decoder side. This function is redundant to PVSetVBVSize. Either one of them is used at a time. */\n    float               vbvDelay;\n\n    /** @brief  Specifies whether frame skipping is permitted or not. When rate control type is set to CONSTANT_Q\n    *           frame skipping is automatically banned.  In CBR_1 and VBR_1 rate control, frame skipping is allowed by default.\n    *           However, users can force no frame skipping with this flag, but buffer constraint may be violated.*/\n    ParamEncMode        noFrameSkipped;\n\n    /** @brief Sets the maximum number of P-frames between two I-frames. I-frame mode is periodically forced\n    *           if no I-frame is encoded after the specified period to add error resiliency and help resynchronize in case of errors.\n    *           If scene change detection can add additional I-frame if new scenes are detected.\n    *           intraPeriod is the I frame interval in terms of second.\n    *           intraPeriod =0 indicates I-frame encoding only;\n    *           intraPeriod = -1  indicates I-frame followed by all P-frames; (default)\n    *           intraPeriod = N, indicates the number of P-frames between 2 I-frames.*/\n    Int                 intraPeriod;\n\n\n    /** @brief  Specifies the number Intra MBs to be refreshed in a P-frame. */\n    Int                 numIntraMB;\n\n    /**\n    *   @brief  Specifies whether the scene change detection (SCD) is enabled or disabled.\n    *           With SCD enable, when a new scene is detected, I-Vop mode will be used for the first frame of\n    *           the new scene resulting in better picture quality. An insertion of an I-VOP resets the intraPeriod\n    *           specified by the IntraPeriodAPI().*/\n    ParamEncMode        sceneDetect;\n\n    /** @brief  Specifies the search range of motion estimation search.  Larger value implies\n    *           larger search range, better motion vector match, but more complexity.\n    *           If searchRange=n, the motion vector search is in the range of [-n,n-1] pixels.\n    *           If half-pel  mode is on, the range is [-n, (n-1)+1/2] pixels. The default value is 16.*/\n    Int                 searchRange;\n\n    /** @brief  Turns on/off 8x8 block motion estimation and compensation.\n    *           If on, four motion vectors may be used for motion estimation and compensation of a macroblock,\n    *           otherwise one motion vector per macroblock is used. When the 8x8 MV is off, the total encoding complexity\n    *           is less but the image quality is also worse. Therefore, it can be used in complexity limited environment.*/\n    ParamEncMode        mv8x8Enable;\n\n\n    /** @brief Set the threshold for using intra DC VLC.\n    *           Value must range from 0-7.*/\n    Int                 intraDCVlcTh;\n\n    /** @brief This flag turns on the use of AC prediction */\n    Bool                useACPred;\n\n} VideoEncOptions;\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n\n    /* API's */\n    /* Always start with this one !!*/\n    /**\n    *   @brief  Gets default encoding options. This way users only have to set relevant encoding options and leave the one\n    *           they are unsure of.\n    *   @encOption  Pointer to VideoEncOption structure.\n    *   @encUseCase This value determines the set of default encoding options, for example, different encoding options\n    *            are assigned to streaming use-case as compared to download use-case. It can be project dependent too.\n    *   @return true for correct operation; false if error happens\n    */\n    OSCL_IMPORT_REF Bool PVGetDefaultEncOption(VideoEncOptions *encOption, Int encUseCase);\n\n    /**\n    *   @brief  Verifies the consistency of encoding parameters, allocates memory needed and set necessary internal variables.\n    *   @param  encCtrl is video encoder control structure that is always passed as input in all APIs\n    *   @return true for correct operation; false if error happens\n    */\n    OSCL_IMPORT_REF Bool    PVInitVideoEncoder(VideoEncControls *encCtrl, VideoEncOptions *encOption);\n\n    /* acquiring encoder info APIs */\n    /**\n    *   @brief  This function returns VOL header. It has to be called before the frame is encoded.  If so,\n    *           then the VOL Header is passed back to the application. Then all frames that are encoded do not contain the VOL Header.\n    *           If you do not call the API then the VOL Header is passed within the first frame that is encoded.\n    *           The behavior is unknown if it is called after the first frame is encoded. It is mainly used for MP4 file format authoring.\n    *   @param  encCtrl is video encoder control structure that is always passed as input in all APIs.\n    *   @param  volHeader is the Buffer for VOL header.\n    *   @param  size is the size of VOL header in bytes.\n    *   @param  layer is the layer of the requested VOL header.\n    *   @return true for correct operation; false if error happens.\n    */\n    OSCL_IMPORT_REF Bool    PVGetVolHeader(VideoEncControls *encCtrl, UChar *volHeader, Int *size, Int layer);\n\n    /**\n    *   @brief  This function returns the profile and level in H.263 coding when the encoding parameters are set\n    *   @param  encCtrl is video encoder control structure that is always passed as input in all APIs.\n    *   @param  profileID is the pointer of the profile ID. Right now we only support profile 0\n    *   @param  levelID is the pointer of the level ID that could be 10-70.\n    *   @return true for correct operation; false if error happens.\n    */\n    OSCL_IMPORT_REF Bool    PVGetH263ProfileLevelID(VideoEncControls *encCtrl, Int *profileID, Int *levelID);\n\n    /**\n    *   @brief  This function returns the profile and level of MPEG4 when the encoding parameters are set\n    *   @param  encCtrl is video encoder control structure that is always passed as input in all APIs.\n    *   @param  profile_level is the pointer of the profile enumeration\n    *   @param  nLayer is the index of the layer of interest\n    *   @return true for correct operation; false if error happens.\n    */\n    OSCL_IMPORT_REF Bool    PVGetMPEG4ProfileLevelID(VideoEncControls *encCtrl, Int *profile_level, Int nLayer);\n\n    /**\n    *   @brief  This function returns maximum frame size in bytes\n    *   @param  encCtrl is video encoder control structure that is always passed as input in all APIs\n    *   @param  maxVideoFrameSize is the pointer of the maximum frame size\n    *   @return true for correct operation; false if error happens\n    */\n    OSCL_IMPORT_REF Bool    PVGetMaxVideoFrameSize(VideoEncControls *encCtrl, Int *maxVideoFrameSize);\n\n#ifndef LIMITED_API\n    /**\n    *   @brief  This function returns the total amount of memory (in bytes) allocated by the encoder library.\n    *   @param  encCtrl is video encoder control structure that is always passed as input in all APIs\n    *   @return true for correct operation; false if error happens\n    */\n    OSCL_IMPORT_REF Int     PVGetEncMemoryUsage(VideoEncControls *encCtrl);\n\n    /**\n    *   @brief  This function is used by PVAuthor to get the size of the VBV buffer.\n    *   @param  encCtrl is video encoder control structure that is always passed as input in all APIs\n    *   @param  VBVSize is the pointer of The size of the VBV buffer in bytes.\n    *   @return true for correct operation; false if error happens\n    */\n    OSCL_IMPORT_REF Bool    PVGetVBVSize(VideoEncControls *encCtrl, Int *VBVSize);\n#endif\n\n    /**\n    *   @brief  This function encodes a frame in YUV 4:2:0 format from the *video_in input frame and put the result in YUV\n    *           for reconstructed frame and bstream for MPEG4 bitstream. The application is required to allocate memory for\n    *           bitstream buffer.The size of the input bitstream memory and the returned output buffer are specified in the\n    *           size field. The encoded layer is specified by the nLayer field. If the current frame is not encoded, size=0 and nLayer=-1.\n    *           Note: If the allocated buffer size is too small to fit a bitstream of a frame, then those extra bits will be left out\n    *                 which can cause syntactic error at the decoder side.\n    *   @param  encCtrl is video encoder control structure that is always passed as input in all APIs\n    *   @param  vid_in is the pointer to VideoEncFrameIO structure containing the YUV input data\n    *   @param  vid_out is the pointer to VideoEncFrameIO structure containing the reconstructed YUV output data after encoding\n    *   @param  nextModTime is the timestamp encoder expects from the next input\n    *   @param  bstream is the pointer to MPEG4 bitstream buffer\n    *   @param  size is the size of bitstream buffer allocated (input) and size of the encoded bitstream (output).\n    *   @param  nLayer is the layer of the encoded frame either 0 for base or 1 for enhancement layer. The value -1 indicates skipped frame due to buffer overflow.\n    *   @return true newfor correct operation; false if error happens\n    */\n    OSCL_IMPORT_REF Bool    PVEncodeVideoFrame(VideoEncControls *encCtrl, VideoEncFrameIO *vid_in, VideoEncFrameIO *vid_out,\n            ULong *nextModTime, UChar *bstream, Int *size, Int *nLayer);\n\n\n    /**\n    *   @brief  This function is used to query overrun buffer. It is used when PVEncodeVideoFrame.returns size that is\n    *           larger than the input size.\n    *   @param  encCtrl is video encoder control structure that is always passed as input in all APIs\n    *   @return Pointer to the overrun buffer. NULL if overrun buffer is not used.\n    */\n    OSCL_IMPORT_REF UChar* PVGetOverrunBuffer(VideoEncControls *encCtrl);\n\n#ifndef NO_SLICE_ENCODE   /* This set of APIs are not working. This functionality has been partially \n    replaced by the introduction of overrun buffer. */\n\n    /* slice-based coding */\n    /**\n    *   @brief  This function sets the input YUV frame and timestamp to be encoded by the slice-based encoding function PVEncodeSlice().\n    *           It also return the memory address the reconstructed frame will be copied to (in advance) and the coded layer number.\n    *           The encoder library processes the timestamp and determine if this frame is to be encoded or not. If the current frame\n    *           is not encoded, nLayer=-1. For frame-based motion estimation, the motion estimation of the entire frame is also performed\n    *           in this function. For MB-based motion estimation, the motion vector is searched while coding each MB in PVEncodeSlice().\n    *   @param  encCtrl is video encoder control structure that is always passed as input in all APIs\n    *   @param  vid_in is the pointer to VideoEncFrameIO structure containing the YUV input data\n    *   @param  nextModTime is the timestamp encoder expects from the next input if this input is rejected and nLayer is set to -1.\n    *   @param  nLayer is the layer of the encoded frame either 0 for base or 1 for enhancement layer. The value -1 indicates skipped frame due to buffer overflow.\n    *   @return true newfor correct operation; false if error happens\n    */\n    OSCL_IMPORT_REF Bool    PVEncodeFrameSet(VideoEncControls *encCtrl, VideoEncFrameIO *vid_in, ULong *nextModTime, Int *nLayer);\n    /**\n    *   @brief  This function encodes a GOB (short header mode) or a packet (data partitioning mode or combined mode with resync marker)\n    *           and output the reconstructed frame and MPEG4 bitstream. The application is required to allocate memory for the bitstream buffer.\n    *           The size of the input bitstream memory and the returned output buffer are specified in the size field.  If the buffer size is\n    *           smaller than the requested packet size, user has to call PVEncodeSlice again to get the rest of that pending packet before moving\n    *           on to the next packet. For the combined mode without resync marker, the function returns when the buffer is full.\n    *           The end-of-frame flag  indicates the completion of the frame encoding.  Next frame must be sent in with PVEncodeFrameSet().\n    *           At the end-of-frame, the next video input address and the next video modulo timestamp will be set.\n    *   @param  encCtrl is video encoder control structure that is always passed as input in all APIs\n    *   @param  bstream is the pointer to MPEG4 bitstream buffer.\n    *   @param  size is the size of bitstream buffer allocated (input) and size of the encoded bitstream (output).\n    *   @param  endofFrame is a flag indicating the end-of-frame, '1'. Otherwise, '0'.  When PVSetNoCurrentFrameSkip is OFF,\n    *           end-of-frame '-1' indicates current frame bitstream must be disregarded.\n    *   @param  vid_out is the pointer to VideoEncFrameIO structure containing the reconstructed YUV output data after encoding\n    *   @param  nextModTime is the timestamp encoder expects from the next input\n    *   @return true newfor correct operation; false if error happens\n    */\n    OSCL_IMPORT_REF Bool    PVEncodeSlice(VideoEncControls *encCtrl, UChar *bstream, Int *size,\n                                          Int *endofFrame, VideoEncFrameIO *vid_out, ULong *nextModTime);\n#endif\n\n    /**\n    *   @brief  This function returns MP4 file format hint track information.\n    *   @param  encCtrl is video encoder control structure that is always passed as input in all APIs\n    *   @param  info is the structure for MP4 hint track information\n    *   @return true for correct operation; false if error happens\n    */\n    OSCL_IMPORT_REF Bool    PVGetHintTrack(VideoEncControls *encCtrl, MP4HintTrack *info);\n\n#ifndef LIMITED_API\n    /**\n    *   @brief  updates target frame rates of the encoded base and enhance layer (if any) while encoding operation is ongoing.\n    *   @param  encCtrl is video encoder control structure that is always passed as input in all APIs\n    *   @param  frameRate is the pointers to array of target frame rates in frames per second,\n    *           frameRate[n] represents the n-th layer's target frame rate.\n    *   @return true for correct operation; false if error happens\n    */\n    OSCL_IMPORT_REF Bool    PVUpdateEncFrameRate(VideoEncControls *encCtrl, float *frameRate); /* for 2-way */\n\n\n    /**\n    *   @brief  updates target bit rates of the encoded base and enhance layer (if any) while encoding operation is ongoing.\n    *   @param  encCtrl is video encoder control structure that is always passed as input in all APIs\n    *   @param  bitRate is the pointers to array of target bit rates in bits per second unit,\n    *           bitRate[n] represents the n-th layer's target bit rate.\n    *   @return true for correct operation; false if error happens\n    */\n    OSCL_IMPORT_REF Bool    PVUpdateBitRate(VideoEncControls *encCtrl, Int *bitRate);           /* for 2-way */\n\n\n    /**\n    *   @brief  updates the INTRA frame refresh interval while encoding operation is ongoing.\n    *   @param  encCtrl is video encoder control structure that is always passed as input in all APIs\n    *   @param  aIFramePeriod is a new value of INTRA frame interval in the unit of number of coded frames.\n    *   @return true for correct operation; false if error happens\n    */\n\n    OSCL_IMPORT_REF Bool    PVUpdateIFrameInterval(VideoEncControls *encCtrl, Int aIFramePeriod);/* for 2-way */\n\n    /**\n    *   @brief  specifies the number Intra MBs to be refreshed\n    *   @param  encCtrl is video encoder control structure that is always passed as input in all APIs\n    *   @param  numMB is the number of Intra MBs to be refreshed\n    *   @return true for correct operation; false if error happens\n    */\n    OSCL_IMPORT_REF Bool    PVUpdateNumIntraMBRefresh(VideoEncControls *encCtrl, Int numMB);  /* for 2-way */\n\n    /**\n    *   @brief  This function is called whenever users want the next base frame to be encoded as an I-Vop.\n    *   @param  encCtrl is video encoder control structure that is always passed as input in all APIs\n    *   @return true for correct operation; false if error happens\n    */\n    OSCL_IMPORT_REF Bool    PVIFrameRequest(VideoEncControls *encCtrl);                         /* for 2-way */\n\n#endif // LIMITED_API\n\n    /* finishing encoder */\n    /**\n    *   @brief  This function frees up all the memory allocated by the encoder library.\n    *   @param  encCtrl is video encoder control structure that is always passed as input in all APIs\n    *   @return true for correct operation; false if error happens\n    */\n    OSCL_IMPORT_REF Bool    PVCleanUpVideoEncoder(VideoEncControls *encCtrl);\n\n#ifdef __cplusplus\n}\n#endif\n#endif /* _MP4ENC_API_H_ */\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/include/pvm4vencoder.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*  File: pvm4vencoder.h                                                        */\n/** This file contains MP4 encoder related classes, structures and enumerations. */\n\n#ifndef __PVM4VENCODER_H\n#define __PVM4VENCODER_H\n\n#include \"cvei.h\"\n#include \"mp4enc_api.h\"\n#include \"ccrgb24toyuv420.h\"\n#include \"ccrgb12toyuv420.h\"\n#include \"ccyuv420semitoyuv420.h\"\n\n#define KCVEIMaxOutputBuffer    10\n#define VISUAL_OBJECT_SEQUENCE_START_CODE   0x01B0\n#define VISUAL_OBJECT_SEQUENCE_END_CODE     0x01B1\n#define VISUAL_OBJECT_START_CODE   0x01B5\n#define VO_START_CODE           0x8\n#define VO_HEADER_LENGTH        32\n#define VOL_START_CODE 0x12\n#define VOL_START_CODE_LENGTH 28\n\n#define GROUP_START_CODE    0x01B3\n#define GROUP_START_CODE_LENGTH  32\n\n#define VOP_ID_CODE_LENGTH      5\n#define VOP_TEMP_REF_CODE_LENGTH    16\n\n#define USER_DATA_START_CODE        0x01B2\n#define USER_DATA_START_CODE_LENGTH 32\n\n#define SHORT_VIDEO_START_MARKER        0x20\n#define SHORT_VIDEO_START_MARKER_LENGTH  22\n\n\n/** Encoding mode specific to MPEG4. */\nenum TMP4EncodingMode\n{\n    /** H263 mode. */\n    EH263_MODE,\n\n    /** Data partitioning mode, packet size must be specified. */\n    EDATA_PARTITIONG_MODE,\n\n    /** Combined mode without resync markers. */\n    ECOMBINING_MODE_NO_ERR_RES,\n\n    /** COmbined mode with resync markers, packet size must be specified. */\n    ECOMBINING_MODE_WITH_ERR_RES\n};\n\n/** Generic ON/OFF. */\nenum TParamEncMode\n{\n    EPV_OFF,\n    EPV_ON\n};\n\ntypedef struct\n{\n    uint8 *data;\n    uint32 numBytes;\n    uint32 bytePos;\n    uint32 bitBuf;\n    uint32 dataBitPos;\n    uint32  bitPos;\n} mp4StreamType;\n\nstatic const uint32 MASK[33] =\n{\n    0x00000000, 0x00000001, 0x00000003, 0x00000007,\n    0x0000000f, 0x0000001f, 0x0000003f, 0x0000007f,\n    0x000000ff, 0x000001ff, 0x000003ff, 0x000007ff,\n    0x00000fff, 0x00001fff, 0x00003fff, 0x00007fff,\n    0x0000ffff, 0x0001ffff, 0x0003ffff, 0x0007ffff,\n    0x000fffff, 0x001fffff, 0x003fffff, 0x007fffff,\n    0x00ffffff, 0x01ffffff, 0x03ffffff, 0x07ffffff,\n    0x0fffffff, 0x1fffffff, 0x3fffffff, 0x7fffffff,\n    0xffffffff\n};\n\n/** MPEG4 encoder class interface. See CommonVideoEncoder APIs for\nvirtual functions definitions. */\nclass CPVM4VEncoder : public CommonVideoEncoder\n{\n\n    public:\n        OSCL_IMPORT_REF static CPVM4VEncoder* New(int32 aThreadId);\n        OSCL_IMPORT_REF ~CPVM4VEncoder();\n\n        OSCL_IMPORT_REF virtual TCVEI_RETVAL SetObserver(MPVCVEIObserver *aObserver);\n        OSCL_IMPORT_REF virtual TCVEI_RETVAL AddBuffer(TPVVideoOutputData *aVidOut);\n        OSCL_IMPORT_REF virtual TCVEI_RETVAL Encode(TPVVideoInputData *aVidIn);\n\n        OSCL_IMPORT_REF virtual TCVEI_RETVAL Initialize(TPVVideoInputFormat *aVidInFormat, TPVVideoEncodeParam *aEncParam);\n        OSCL_IMPORT_REF virtual int32 GetBufferSize();\n        OSCL_IMPORT_REF virtual TCVEI_RETVAL GetVolHeader(uint8 *volHeader, int32 *size, int32 layer);\n\n        OSCL_IMPORT_REF virtual TCVEI_RETVAL EncodeFrame(TPVVideoInputData  *aVidIn, TPVVideoOutputData *aVidOut, int *aRemainingBytes\n#ifdef PVAUTHOR_PROFILING\n                , void *aParam1 = 0\n#endif\n                                                        );\n\n        OSCL_IMPORT_REF virtual TCVEI_RETVAL FlushOutput(TPVVideoOutputData *aVidOut);\n        OSCL_IMPORT_REF virtual TCVEI_RETVAL Terminate();\n        OSCL_IMPORT_REF virtual TCVEI_RETVAL UpdateBitRate(int32 aNumLayer, int32 *aBitRate);\n        OSCL_IMPORT_REF virtual TCVEI_RETVAL UpdateFrameRate(int32 aNumLayer, float *aFrameRate);\n        OSCL_IMPORT_REF virtual TCVEI_RETVAL UpdateIFrameInterval(int32 aIFrameInterval);\n        OSCL_IMPORT_REF virtual TCVEI_RETVAL IFrameRequest();\n\n        /** Set the forced number of intra macroblock per frame for error resiliency. */\n        OSCL_IMPORT_REF TCVEI_RETVAL SetIntraMBRefresh(int32 aNumMBRefresh);\n\n        OSCL_IMPORT_REF virtual int32 GetEncodeWidth(int32 aLayer);\n        OSCL_IMPORT_REF virtual int32 GetEncodeHeight(int32 aLayer);\n        OSCL_IMPORT_REF virtual float GetEncodeFrameRate(int32 aLayer);\n    private:\n\n        CPVM4VEncoder();\n        bool Construct(int32 aThreadId);\n#ifdef  YUV_INPUT\n        void CopyToYUVIn(uint8 *YUV, int width, int height, int width_16, int height_16);\n#endif\n\n        /** Color conversion instance RGB24/RGB12/YUV420SEMI to YUV 420 */\n#if defined(RGB24_INPUT) || defined (RGB12_INPUT) || defined(YUV420SEMIPLANAR_INPUT)\n        ColorConvertBase *ccRGBtoYUV;\n#endif\n\n#ifdef FOR_3GPP_COMPLIANCE\n        void Check3GPPCompliance(TPVVideoEncodeParam *aEncParam, int *aEncWidth, int *aEncHeight);\n#endif\n\n        /* Parsing FSI */\n        TCVEI_RETVAL ParseFSI(uint8* aFSIBuff, int FSILength, VideoEncOptions *aEncOption);\n        int16 ShowBits(mp4StreamType *pStream, uint8 ucNBits, uint32 *pulOutData);\n        int16 FlushBits(mp4StreamType *pStream, uint8 ucNBits);\n        int16 ReadBits(mp4StreamType *pStream, uint8 ucNBits, uint32 *pulOutData);\n        int16 ByteAlign(mp4StreamType *pStream);\n        int16 iDecodeShortHeader(mp4StreamType *psBits, VideoEncOptions *aEncOption);\n\n\n        /* Pure virtuals from OsclActiveObject implemented in this derived class */\n        virtual void Run(void);\n        virtual void DoCancel(void);\n        MPVCVEIObserver *iObserver;\n\n        int     iSrcWidth;\n        int     iSrcHeight;\n        int     iSrcFrameRate;\n        int     iFrameOrientation;\n        int     iEncWidth[4];\n        int     iEncHeight[4];\n        float   iEncFrameRate[4];\n        TPVVideoFormat  iVideoFormat;\n\n        /* variables needed in operation */\n        VideoEncControls iEncoderControl;\n        bool    iInitialized;\n        uint8   *iYUVIn;\n        uint8   *iVideoIn;\n        uint8   *iVideoOut;\n        TPVVideoOutputData *iOutputData[KCVEIMaxOutputBuffer];\n        int32       iNumOutputData;\n        uint32      iTimeStamp;\n        uint32      iNextModTime;\n        uint8   *iOverrunBuffer;\n        int     iOBSize;\n\n        /* Tables in color coversion */\n        uint8  *iY_Table;\n        uint16 *iCb_Table, *iCr_Table, *ipCb_Table, *ipCr_Table;\n\n\n        int     iNumLayer;\n};\n\n#endif\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/oscl/oscl_base.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef OSCL_BASE_H_INCLUDED\n#define OSCL_BASE_H_INCLUDED\n\n#include \"oscl_config.h\"\n#include \"oscl_types.h\"\n#include \"oscl_error.h\"\n\nclass OsclBase\n{\n    public:\n        OSCL_IMPORT_REF  static void Init() {};\n        OSCL_IMPORT_REF  static void Cleanup() {};\n};\n\nclass OsclErrorTrap\n{\n    public:\n\n        OSCL_IMPORT_REF  static void Init() {};\n        OSCL_IMPORT_REF  static void Cleanup() {};\n        OSCL_IMPORT_REF  static void leave(int) {};\n};\n\nclass OsclMem\n{\n    public:\n        OSCL_IMPORT_REF  static void Init() {};\n        OSCL_IMPORT_REF  static void Cleanup() {};\n};\n\nclass OsclRequestStatus\n{\n    public:\n        OsclRequestStatus();\n        OsclRequestStatus(int32 aVal)\n        {\n            (void)(aVal);\n        };\n        int32 operator=(int32 aVal);\n        int32 operator==(int32 aVal) const;\n        int32 operator!=(int32 aVal) const;\n        int32 operator>=(int32 aVal) const;\n        int32 operator<=(int32 aVal) const;\n        int32 operator>(int32 aVal) const;\n        int32 operator<(int32 aVal) const;\n        int32 Int() const;\n    private:\n        int32 iStatus;\n};\n\nclass OsclActiveObject\n{\n    public:\n        /**\n         * Scheduling priorities.\n         */\n        enum TPriority\n        {\n            /**\n            A low priority, useful for active objects representing\n            background processing.\n            */\n            EPriorityIdle = -100,\n            /**\n            A priority higher than EPriorityIdle but lower than EPriorityStandard.\n            */\n            EPriorityLow = -20,\n            /**\n            Most active objects will have this priority.\n            */\n            EPriorityStandard = 0,\n            /**\n            A priority higher than EPriorityStandard; useful for active objects\n            handling user input.\n            */\n            EPriorityUserInput = 10,\n            /**\n            A priority higher than EPriorityUserInput.\n            */\n            EPriorityHigh = 20\n        };\n\n        /**\n         * Constructor.\n         * @param aPriority (input param): scheduling priority\n         * @param name (inpup param): optional name for this AO.\n         */\n        OSCL_IMPORT_REF OsclActiveObject(int32 aPriority, const char name[]);\n\n        /**\n         * Destructor.\n         */\n        OSCL_IMPORT_REF virtual ~OsclActiveObject();\n\n        /**\n         * Set request active for this AO.\n         * Will panic if the request is already active,\n         * or the active object is not added to any scheduler,\n         * or the calling thread context does not match\n         * the scheduler thread.\n         */\n        OSCL_IMPORT_REF void SetBusy();\n\n        /**\n         * Return true if this AO is active,\n         * false otherwise.\n         */\n        OSCL_IMPORT_REF bool IsBusy() const;\n\n        /**\n         * Set request active for this AO and set the status to pending.\n         * PendForExec is identical to SetBusy, but it\n         * additionally sets the request status to OSCL_REQUEST_PENDING.\n         *\n         */\n        OSCL_IMPORT_REF void PendForExec();\n\n        /**\n         * Complate the active request for the AO.  Can be\n         * called from any thread.\n         * @param aStatus: request completion status.\n         */\n        OSCL_IMPORT_REF void PendComplete(int32 aStatus);\n\n\n        /**\n         * Add this AO to the current thread's scheduler.\n         */\n        OSCL_IMPORT_REF void AddToScheduler();\n\n        /**\n         * Return true if this AO is added to the scheduler,\n         * false otherwise.\n         */\n        OSCL_IMPORT_REF bool IsAdded() const;\n\n        /**\n         * Remove this AO from its scheduler.\n         * Will panic if the calling thread context does\n         * not match the scheduling thread.\n         * Cancels any active request before removing.\n         */\n        OSCL_IMPORT_REF void RemoveFromScheduler();\n\n        /**\n         * Deque is identical to RemoveFromScheduler\n         * It's only needed to prevent accidental usage\n         * of Symbian CActive::Deque.\n         */\n        OSCL_IMPORT_REF void Deque();\n\n        /**\n         * Complete this AO's request immediately.\n         * If the AO is already active, this will do nothing.\n         * Will panic if the AO is not acced to any scheduler,\n         * or if the calling thread context does not match the\n         * scheduling thread.\n         */\n        OSCL_IMPORT_REF void RunIfNotReady();\n\n        /**\n         * Cancel any active request.\n         * If the request is active, this will call the DoCancel\n         * routine, wait for the request to cancel, then set the\n         * request inactive.  The AO will not run.\n         * If the request is not active, it does nothing.\n         * Request must be canceled from the same thread\n         * in which it is scheduled.\n         */\n        OSCL_IMPORT_REF void Cancel();\n\n        /**\n        * Return scheduling priority of this active object.\n        */\n        OSCL_IMPORT_REF int32 Priority() const;\n\n        /**\n        * Request status access\n        */\n        OSCL_IMPORT_REF int32 Status()const;\n        OSCL_IMPORT_REF void SetStatus(int32);\n        OSCL_IMPORT_REF int32 StatusRef();\n\n    protected:\n        /**\n         * Cancel request handler.\n         * This gets called by scheduler when the request\n         * is cancelled.  The default routine will complete\n         * the request.  If any additional action is needed,\n         * the derived class may override this.  If the derived class\n         * does override DoCancel, it must complete the request.\n         */\n        //OSCL_IMPORT_REF virtual void DoCancel();\n\n        /**\n        * Run Error handler.\n        * This gets called by scheduler when the Run routine leaves.\n        * The default implementation simply returns the leave code.\n        * If the derived class wants to handle errors from Run,\n        * it may override this.  The RunError should return OsclErrNone\n        * if it handles the error, otherwise it should return the\n        * input error code.\n        * @param aError: the leave code generated by the Run.\n        */\n        //OSCL_IMPORT_REF virtual int32 RunError(int32 aError);\n};\n\n\nclass OsclTimerObject\n{\n    public:\n        /**\n         * Constructor.\n         * @param aPriority (input param): scheduling priority\n         * @param name (input param): optional name for this AO.\n         */\n        OSCL_IMPORT_REF OsclTimerObject(int32 aPriority, const char name[]);\n\n        /**\n         * Destructor.\n         */\n\n        //OSCL_IMPORT_REF virtual ~OsclTimerObject();\n\n        /**\n         * Add this AO to the current thread's scheduler.\n         */\n        OSCL_IMPORT_REF void AddToScheduler();\n\n        /**\n         * Return true if this AO is added to the scheduler,\n         * false otherwise.\n         */\n        OSCL_IMPORT_REF bool IsAdded() const;\n\n        /**\n         * Remove this AO from its scheduler.\n         * Will panic if the calling thread context does\n         * not match the scheduling thread.\n         * Cancels any active request before removing.\n         */\n        OSCL_IMPORT_REF void RemoveFromScheduler();\n\n        /**\n         * Deque is identical to RemoveFromScheduler\n         * It's only needed to prevent accidental usage\n         * of Symbian CActive::Deque.\n         */\n        OSCL_IMPORT_REF void Deque();\n\n        /**\n        * 'After' sets the request active, with request status\n        * OSCL_REQUEST_STATUS_PENDING, and starts a timer.\n        * When the timer expires, the request will complete with\n        * status OSCL_REQUEST_ERR_NONE.\n        * Must be called from the same thread in which the\n        * active object is scheduled.\n        * Will panic if the request is already active, the object\n        * is not added to any scheduler, or the calling thread\n        * does not match the scheduling thread.\n        * @param anInterval: timeout interval in microseconds.\n        */\n        OSCL_IMPORT_REF void After(int32 aDelayMicrosec);\n\n        /**\n         * Complete the request after a time interval.\n         * RunIfNotReady is identical to After() except that it\n         * first checks the request status, and if it is already\n         * active, it does nothing.\n         *\n         * @param aDelayMicrosec (input param): delay in microseconds.\n         */\n        OSCL_IMPORT_REF void RunIfNotReady(uint32 aDelayMicrosec = 0);\n\n        /**\n         * Set request active for this AO.\n         * Will panic if the request is already active,\n         * or the active object is not added to any scheduler,\n         * or the calling thread context does not match\n         * the scheduler thread.\n         */\n        OSCL_IMPORT_REF void SetBusy();\n\n        /**\n         * Return true if this AO is active,\n         * false otherwise.\n         */\n        OSCL_IMPORT_REF bool IsBusy() const;\n\n        /**\n         * Cancel any active request.\n         * If the request is active, this will call the DoCancel\n         * routine, wait for the request to cancel, then set the\n         * request inactive.  The AO will not run.\n         * If the request is not active, it does nothing.\n         * Request must be canceled from the same thread\n         * in which it is scheduled.\n         */\n        OSCL_IMPORT_REF void Cancel();\n\n        /**\n        * Return scheduling priority of this active object.\n        */\n        OSCL_IMPORT_REF int32 Priority() const;\n        /**\n        * Request status access\n        */\n        OSCL_IMPORT_REF int32 Status()const;\n        OSCL_IMPORT_REF void SetStatus(int32);\n        OSCL_IMPORT_REF int32 StatusRef();\n\n    protected:\n        /**\n         * Cancel request handler.\n         * This gets called by scheduler when the request\n         * is cancelled.  The default routine will cancel\n         * the timer.  If any additional action is needed,\n         * the derived class may override this.  If the\n         * derived class does override this, it should explicitly\n         * call OsclTimerObject::DoCancel in its own DoCancel\n         * routine.\n         */\n        //OSCL_IMPORT_REF virtual void DoCancel();\n\n        /**\n        * Run Error handler.\n        * This gets called by scheduler when the Run routine leaves.\n        * The default implementation simply returns the leave code.\n        * If the derived class wants to handle errors from Run,\n        * it may override this.  The RunError should return OsclErrNone\n        * if it handles the error, otherwise it should return the\n        * input error code.\n        * @param aError: the leave code generated by the Run.\n        */\n        //OSCL_IMPORT_REF virtual int32 RunError(int32 aError);\n};\n\n#endif // OSCL_BASE_H_INCLUDED\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/oscl/oscl_base_macros.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef OSCL_BASE_MACROS_H_INCLUDED\n#define OSCL_BASE_MACROS_H_INCLUDED\n\n#ifndef OSCL_UNUSED_ARG\n#define OSCL_UNUSED_ARG(x) (void)(x)\n#endif\n\n#endif // OSCL_BASE_MACROS_H_INCLUDED\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/oscl/oscl_config.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef OSCL_CONFIG_H_INCLUDED\n#define OSCL_CONFIG_H_INCLUDED\n\n#define OSCL_HAS_BREW_SUPPORT 0   //Not yet supported\n\n#define OSCL_HAS_SYMBIAN_SUPPORT 0 // Not yet supported\n\n#define OSCL_HAS_LINUX_SUPPORT 1\n\n#endif // OSCL_CONFIG_H_INCLUDED\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/oscl/oscl_dll.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef OSCL_DLL_H_INCLUDED\n#define OSCL_DLL_H_INCLUDED\n\n#define OSCL_DLL_ENTRY_POINT() void oscl_dll_entry_point() {}\n\n\n/**\n * Default DLL entry/exit point function.\n *\n * The body of the DLL entry point is given.  The macro\n * only needs to be declared within the source file.\n *\n * Usage :\n *\n * OSCL_DLL_ENTRY_POINT_DEFAULT()\n */\n\n#define OSCL_DLL_ENTRY_POINT_DEFAULT()\n\n\n\n#endif // OSCL_DLL_H_INCLUDED\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/oscl/oscl_error.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef OSCL_ERROR_H_INCLUDED\n#define OSCL_ERROR_H_INCLUDED\n\n\n#define OSCL_LEAVE(x)\n\n\n#endif //OSCL_ERROR_H_INCLUDED\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/oscl/oscl_error_codes.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n\n#ifndef OSCL_ERROR_CODES_H_INCLUDED\n#define OSCL_ERROR_CODES_H_INCLUDED\n\n\n/** Leave Codes\n*/\ntypedef int32 OsclLeaveCode;\n\n#define OsclErrNone 0\n#define OsclErrGeneral 100\n#define OsclErrNoMemory 101\n#define OsclErrCancelled 102\n#define OsclErrNotSupported 103\n#define OsclErrArgument 104\n#define OsclErrBadHandle 105\n#define OsclErrAlreadyExists 106\n#define OsclErrBusy 107\n#define OsclErrNotReady 108\n#define OsclErrCorrupt 109\n#define OsclErrTimeout 110\n#define OsclErrOverflow 111\n#define OsclErrUnderflow 112\n#define OsclErrInvalidState 113\n#define OsclErrNoResources 114\n\n/** For backward compatibility with old definitions\n*/\n#define OSCL_ERR_NONE OsclErrNone\n#define OSCL_BAD_ALLOC_EXCEPTION_CODE OsclErrNoMemory\n\n/** Return Codes\n*/\ntypedef int32 OsclReturnCode;\n\n#define  OsclSuccess 0\n#define  OsclPending 1\n#define  OsclFailure -1\n\n#endif\n\n/*! @} */\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/oscl/oscl_exception.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n\n#ifndef OSCL_EXCEPTION_H_INCLUDED\n#define OSCL_EXCEPTION_H_INCLUDED\n\n\n\n#endif // INCLUDED_OSCL_EXCEPTION_H\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/oscl/oscl_math.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef OSCL_MATH_H_INCLUDED\n#define OSCL_MATH_H_INCLUDED\n\n#include <math.h>\n\n\n\n#define oscl_pow        pow\n#define oscl_exp        exp\n#define oscl_sqrt       sqrt\n#define oscl_log        log\n#define oscl_cos        cos\n#define oscl_sin        sin\n#define oscl_tan        tan\n#define oscl_asin       asin\n\n#endif // OSCL_MATH_H_INCLUDED\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/oscl/oscl_mem.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef OSCL_MEM_H_INCLUDED\n#define OSCL_MEM_H_INCLUDED\n\n#include \"oscl_types.h\"\n\n#define OSCLMemSizeT size_t\n\n#define oscl_memcpy(dest, src, count)       memcpy((void *)(dest), (const void *)(src), (OSCLMemSizeT)(count))\n#define oscl_memset(dest, ch, count)        memset((void *)(dest), (unsigned char)(ch), (OSCLMemSizeT)(count))\n#define oscl_memmove(dest, src, bytecount)  memmove((void *)(dest), (const void *)(src), (OSCLMemSizeT)(bytecount))\n#define oscl_memcmp(buf1, buf2, count)      memcmp( (const void *)(buf1), (const void *)(buf2), (OSCLMemSizeT)(count))\n#define oscl_malloc(size)                      malloc((OSCLMemSizeT)(size))\n#define oscl_free(memblock)                 free((void *)(memblock))\n#define OSCL_ARRAY_DELETE(ptr)              delete [] ptr\n#define OSCL_ARRAY_NEW(T, count)            new T[count]\n#define OSCL_DELETE(memblock)               delete memblock\n#define OSCL_NEW(arg)                       new arg\n\n#endif // OSCL_MEM_H_INCLUDED\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/oscl/oscl_types.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*! \\file oscl_types.h\n    \\brief This file contains basic type definitions for common use across platforms.\n\n*/\n\n\n\n#ifndef OSCL_TYPES_H_INCLUDED\n#define OSCL_TYPES_H_INCLUDED\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <stdarg.h>\n#include <string.h>\n#include <limits.h>\n#include <string.h>\n\n//! A typedef for a signed 8 bit integer.\n#ifndef int8\ntypedef signed char int8;\n#endif\n\n//! A typedef for an unsigned 8 bit integer.\n#ifndef uint8\ntypedef unsigned char uint8;\n#endif\n\n//! A typedef for a signed 16 bit integer.\n#ifndef int16\ntypedef short int16;\n#endif\n\n//! A typedef for an unsigned 16 bit integer.\n#ifndef uint16\ntypedef unsigned short uint16;\n#endif\n\n//! A typedef for a signed 32 bit integer.\n#ifndef int32\ntypedef long int32;\n#endif\n\n//! A typedef for an unsigned 32 bit integer.\n#ifndef uint32\ntypedef unsigned long uint32;\n#endif\n\n#ifndef sint8\ntypedef signed char sint8;\n#endif\n\n#ifndef OsclFloat\ntypedef float OsclFloat;\n#endif\n\n#ifndef uint\ntypedef unsigned int uint;\n#endif\n\n\n#ifndef int64\n#define OSCL_HAS_NATIVE_INT64_TYPE 1\n#define OSCL_NATIVE_INT64_TYPE long long\ntypedef OSCL_NATIVE_INT64_TYPE int64;\n#endif // int64\n\n#ifndef uint64\n#define OSCL_HAS_NATIVE_UINT64_TYPE  1\n#define OSCL_NATIVE_UINT64_TYPE unsigned long long\ntypedef OSCL_NATIVE_UINT64_TYPE uint64;\n#endif // uint64\n\n#ifndef OSCL_UNUSED_ARG\n#define OSCL_UNUSED_ARG(x) (void)(x)\n#endif\n\n#ifndef OSCL_EXPORT_REF\n#define OSCL_EXPORT_REF\n#endif\n\n#ifndef OSCL_IMPORT_REF\n#define OSCL_IMPORT_REF\n#endif\n\n#if defined(OSCL_DISABLE_INLINES)\n#define OSCL_INLINE\n#define OSCL_COND_EXPORT_REF OSCL_EXPORT_REF\n#define OSCL_COND_IMPORT_REF OSCL_IMPORT_REF\n#else\n#define OSCL_INLINE inline\n#define OSCL_COND_IMPORT_REF\n#define OSCL_COND_IMPORT_REF\n#endif\n\n#ifndef INT64\n#define INT64 int64\n#endif\n\n#ifndef UINT64\n#define UINT64 uint64\n#endif\n\n#ifndef UINT64_HILO\n#define UINT64_HILO(a,b) ((a<<32) | b)\n#endif\n\n\n#endif // OSCL_TYPES_H_INCLUDED\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/oscl/osclconfig_compiler_warnings.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n// -*- c++ -*-\n// = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =\n\n//       O S C L C O N F I G _ C O M P I L E R  _ W A R N I N G S\n\n// = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =\n\n\n/*! \\file osclconfig_compiler_warnings.h\n *  \\brief This file contains the ability to turn off/on compiler warnings\n *\n */\n\n// This macro enables the \"#pragma GCC system_header\" found in any header file that\n// includes this config file.\n// \"#pragma GCC system_header\" suppresses compiler warnings in the rest of that header\n// file by treating the header as a system header file.\n// For instance, foo.h has 30 lines, \"#pragma GCC system_header\" is inserted at line 10,\n// from line 11 to the end of file, all compiler warnings are disabled.\n// However, this does not affect any files that include foo.h.\n//\n#ifdef __GNUC__\n#define OSCL_DISABLE_GCC_WARNING_SYSTEM_HEADER\n#endif\n\n#define OSCL_FUNCTION_PTR(x) (&x)\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/bitstream_io.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/* Date: 8/02/04                                                                */\n/* Description:                                                                 */\n/*  Change the bitstream parsing algorithm. Use temporary word of 2 or 4 bytes  */\n/*  before writing it to the bitstream buffer.                                  */\n/*  Note byteCount doesn't have to be multiple of 2 or 4                        */\n/*********************************************************************************/\n\n#include \"bitstream_io.h\"\n#include \"m4venc_oscl.h\"\n\nstatic const UChar Mask[ ] =\n{\n    0x00, 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3F, 0x7F, 0xFF\n};\n\n#define WORD_SIZE   4   /* for 32-bit machine */\n\n/*Note:\n    1. There is a problem when output the last bits(which can not form a byte yet\n    so when you output, you need to stuff to make sure it is a byte\n    2.  I now hard coded byte to be 8 bits*/\n\n\n/* ======================================================================== */\n/*  Function : BitStreamCreateEnc(Int bufferSize )                          */\n/*  Date     : 08/29/2000                                                   */\n/*  Purpose  : Create a bitstream to hold one encoded video packet or frame */\n/*  In/out   :                                                              */\n/*      bufferSize  :   size of the bitstream buffer in bytes               */\n/*  Return   : Pointer to the BitstreamEncVideo                             */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nBitstreamEncVideo *BitStreamCreateEnc(Int bufferSize)\n{\n    BitstreamEncVideo *stream;\n    stream = (BitstreamEncVideo *) M4VENC_MALLOC(sizeof(BitstreamEncVideo));\n    if (stream == NULL)\n    {\n        return NULL;\n    }\n    stream->bufferSize = bufferSize;\n    stream->bitstreamBuffer = (UChar *) M4VENC_MALLOC(stream->bufferSize * sizeof(UChar));\n    if (stream->bitstreamBuffer == NULL)\n    {\n        M4VENC_FREE(stream);\n        stream = NULL;\n        return NULL;\n    }\n    M4VENC_MEMSET(stream->bitstreamBuffer, 0, stream->bufferSize*sizeof(UChar));\n    stream->word = 0;\n#if WORD_SIZE==4\n    stream->bitLeft = 32;\n#else\n    stream->bitLeft = 16;\n#endif\n    stream->byteCount = 0;\n\n    stream->overrunBuffer = NULL;\n    stream->oBSize = 0;\n\n    return stream;\n}\n\n/* ======================================================================== */\n/*  Function : BitstreamCloseEnc( )                                         */\n/*  Date     : 08/29/2000                                                   */\n/*  Purpose  : close a bitstream                                            */\n/*  In/out   :\n        stream  :   the bitstream to be closed                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nVoid  BitstreamCloseEnc(BitstreamEncVideo *stream)\n{\n    if (stream)\n    {\n        if (stream->bitstreamBuffer)\n        {\n            M4VENC_FREE(stream->bitstreamBuffer);\n        }\n\n        M4VENC_FREE(stream);\n    }\n}\n\n\n/* ======================================================================== */\n/*  Function : BitstreamPutBits(BitstreamEncVideo *stream, Int Length,\n                         Int Value)                                         */\n/*  Date     : 08/29/2000                                                   */\n/*  Purpose  : put Length (1-16) number of bits to the stream               */\n/*            for 32-bit machine this function can do upto 32 bit input     */\n/*  In/out   :                                                              */\n/*      stream      the bitstream where the bits are put in                 */\n/*      Length      bits length (should belong to 1 to 16)                  */\n/*      Value       those bits value                                        */\n/*  Return   :  PV_STATUS                                                   */\n/*  Modified :                                                              */\n/* ======================================================================== */\nPV_STATUS BitstreamPutBits(BitstreamEncVideo *stream, Int Length, UInt Value)\n{\n    PV_STATUS status;\n\n    if (stream->bitLeft > Length)\n    {\n        stream->word <<= Length;\n        stream->word |= Value;  /* assuming Value is not larger than Length */\n        stream->bitLeft -= Length;\n        return PV_SUCCESS;\n    }\n    else\n    {\n\n        stream->word <<= stream->bitLeft;\n        Length -= stream->bitLeft;\n        stream->word |= ((UInt)Value >> Length);\n\n        status = BitstreamSaveWord(stream);\n        if (status != PV_SUCCESS)\n        {\n            return status;\n        }\n\n        /* we got new Length and Value */\n        /* note that Value is not \"clean\" because of msb are not masked out */\n        stream->word = Value;\n        stream->bitLeft -= Length;\n        /* assuming that Length is no more than 16 bits */\n        /* stream->bitLeft should be greater than zero at this point */\n        //if(stream->bitLeft<=0)\n        //  exit(-1);\n        return PV_SUCCESS;\n    }\n}\n\n/* ======================================================================== */\n/*  Function : BitstreamPutGT16Bits(BitstreamEncVideo *stream, Int Length, UInt32 Value)    */\n/*  Date     : 08/29/2000                                                   */\n/*  Purpose  : Use this function to put Length (17-32) number of bits to    */\n/*              for 16-bit machine  the stream.                             */\n/*  In/out   :                                                              */\n/*      stream      the bitstream where the bits are put in                 */\n/*      Length      bits length (should belong to 17 to 32)                 */\n/*      Value       those bits value                                        */\n/*  Return   :  PV_STATUS                                                   */\n/*  Modified :                                                              */\n/* ======================================================================== */\nPV_STATUS BitstreamPutGT16Bits(BitstreamEncVideo *stream, Int Length, ULong Value)\n{\n    PV_STATUS status;\n    UInt topValue;\n    Int topLength;\n\n    topValue = (Value >> 16);\n    topLength = Length - 16;\n\n    if (topLength > 0)\n    {\n        status = BitstreamPutBits(stream, topLength, topValue);\n\n        if (status != PV_SUCCESS)\n        {\n            return status;\n        }\n\n        status = BitstreamPutBits(stream, 16, (UInt)(Value & 0xFFFF));\n\n        return status;\n    }\n    else\n    {\n        status = BitstreamPutBits(stream, Length, (UInt)Value);\n        return status;\n    }\n}\n\n/* ======================================================================== */\n/*  Function : BitstreamSaveWord                                            */\n/*  Date     : 08/03/2004                                                   */\n/*  Purpose  : save written word into the bitstream buffer.                 */\n/*  In/out   :                                                              */\n/*      stream      the bitstream where the bits are put in                 */\n/*  Return   :  PV_STATUS                                                   */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nPV_STATUS BitstreamSaveWord(BitstreamEncVideo *stream)\n{\n    UChar *ptr;\n    UInt word;\n\n    /* assume that stream->bitLeft is always zero when this function is called */\n    if (stream->byteCount + WORD_SIZE > stream->bufferSize)\n    {\n        if (PV_SUCCESS != BitstreamUseOverrunBuffer(stream, WORD_SIZE))\n        {\n            stream->byteCount += WORD_SIZE;\n            return PV_FAIL;\n        }\n    }\n\n    ptr = stream->bitstreamBuffer + stream->byteCount;\n    word = stream->word;\n    stream->word = 0; /* important to reset to zero */\n\n    /* NOTE: byteCount does not have to be multiple of 2 or 4 */\n#if (WORD_SIZE == 4)\n    *ptr++ = word >> 24;\n    *ptr++ = 0xFF & (word >> 16);\n#endif\n\n    *ptr++ = 0xFF & (word >> 8);\n    *ptr = 0xFF & word;\n\n#if (WORD_SIZE == 4)\n    stream->byteCount += 4;\n    stream->bitLeft = 32;\n#else\n    stream->byteCount += 2;\n    stream->bitLeft = 16;\n#endif\n\n    return PV_SUCCESS;\n}\n\n\n/* ======================================================================== */\n/*  Function : BitstreamSavePartial                                         */\n/*  Date     : 08/03/2004                                                   */\n/*  Purpose  : save unfinished written word into the bitstream buffer.      */\n/*  In/out   :                                                              */\n/*      stream      the bitstream where the bits are put in                 */\n/*  Return   :  PV_STATUS                                                   */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nPV_STATUS BitstreamSavePartial(BitstreamEncVideo *stream, Int *fraction)\n{\n    UChar *ptr;\n    UInt word, shift;\n    Int numbyte, bitleft, bitused;\n\n    bitleft = stream->bitLeft;\n    bitused = (WORD_SIZE << 3) - bitleft; /* number of bits used */\n    numbyte = bitused >> 3; /* number of byte fully used */\n\n    if (stream->byteCount + numbyte > stream->bufferSize)\n    {\n        if (PV_SUCCESS != BitstreamUseOverrunBuffer(stream, numbyte))\n        {\n            stream->byteCount += numbyte;\n            return PV_FAIL;\n        }\n    }\n\n    ptr = stream->bitstreamBuffer + stream->byteCount;\n    word = stream->word;\n    word <<= bitleft;   /* word is not all consumed */\n    bitleft = bitused - (numbyte << 3); /* number of bits used (fraction) */\n    stream->byteCount += numbyte;\n    if (bitleft)\n    {\n        *fraction = 1;\n    }\n    else\n    {\n        *fraction = 0;\n    }\n    bitleft = (WORD_SIZE << 3) - bitleft;\n    /* save new value */\n    stream->bitLeft = bitleft;\n\n    shift = ((WORD_SIZE - 1) << 3);\n    while (numbyte)\n    {\n        *ptr++ = (UChar)((word >> shift) & 0xFF);\n        word <<= 8;\n        numbyte--;\n    }\n\n    if (*fraction)\n    {// this could lead to buffer overrun when ptr is already out of bound.\n        //  *ptr = (UChar)((word>>shift)&0xFF); /* need to do it for the last fractional byte */\n    }\n\n    /* save new values */\n    stream->word = word >> bitleft;\n\n    /* note we don't update byteCount, bitLeft and word */\n    /* so that encoder can continue PutBits if they don't */\n\n    return PV_SUCCESS;\n}\n\n\n/* ======================================================================== */\n/*  Function : BitstreamShortHeaderByteAlignStuffing(                       */\n/*                                      BitstreamEncVideo *stream)          */\n/*  Date     : 08/29/2000                                                   */\n/*  Purpose  : bit stuffing for next start code in short video header       */\n/*  In/out   :                                                              */\n/*  Return   :  number of bits to be stuffed                                */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nInt BitstreamShortHeaderByteAlignStuffing(BitstreamEncVideo *stream)\n{\n    UInt restBits;\n    Int fraction;\n\n    restBits = (stream->bitLeft & 0x7); /* modulo 8 */\n\n    if (restBits)  /*short_video_header[0] is 1 in h263 baseline*/\n    {\n        /* H.263 style stuffing */\n        BitstreamPutBits(stream, restBits, 0);\n    }\n\n    if (stream->bitLeft != (WORD_SIZE << 3))\n    {\n        BitstreamSavePartial(stream, &fraction);\n    }\n\n    return restBits;\n}\n\n/* ======================================================================== */\n/*  Function : BitstreamMpeg4ByteAlignStuffing(BitstreamEncVideo *stream)   */\n/*  Date     : 08/29/2000                                                   */\n/*  Purpose  : bit stuffing for next start code in MPEG-4                  */\n/*  In/out   :                                                              */\n/*  Return   :  number of bits to be stuffed                                */\n/*  Modified :                                                              */\n/* ======================================================================== */\nInt BitstreamMpeg4ByteAlignStuffing(BitstreamEncVideo *stream)\n{\n\n    UInt restBits;\n    Int fraction;\n    /* Question: in MPEG-4 , short_video_header[0]==0 => even already byte aligned, will still stuff 8 bits\n       need to check with  */\n    /*if (!(getPointerENC(index1, index2)%8) && short_video_header[0]) return 0;*/\n\n    /* need stuffing bits, */\n    BitstreamPutBits(stream, 1, 0);\n\n    restBits = (stream->bitLeft & 0x7); /* modulo 8 */\n\n    if (restBits)  /*short_video_header[0] is 1 in h263 baseline*/\n    {\n        /* need stuffing bits, */\n        BitstreamPutBits(stream, restBits, Mask[restBits]);\n    }\n\n    if (stream->bitLeft != (WORD_SIZE << 3))\n    {\n        BitstreamSavePartial(stream, &fraction);\n    }\n\n    return (restBits);\n}\n\n/*does bit stuffing for next resync marker*/\n/*  does bit stuffing for next resync marker\n *                                            \"0\"\n *                                           \"01\"\n *                                          \"011\"\n *                                         \"0111\"\n *                                        \"01111\"\n *                                       \"011111\"\n *                                      \"0111111\"\n *                                     \"01111111\"   (8-bit codeword)\n */\n\n/*Int BitstreamNextResyncMarkerEnc(BitstreamEncVideo *stream)\n{\n  Int count;\n  BitstreamPut1Bits(stream,0);\n  count=8-stream->totalBits & 8;\n  BitstreamPutBits(stream,count,Mask[count]);\n  return count;\n}*/\n\n/* ======================================================================== */\n/*  Function : BitstreamAppendEnc( BitstreamEncVideo *bitstream1,           */\n/*                                      BitstreamEncVideo *bitstream2   )   */\n/*  Date     : 08/29/2000                                                   */\n/*  Purpose  : Append the intermediate bitstream (bitstream2) to the end of */\n/*                              output bitstream(bitstream1)                */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\n\nPV_STATUS BitstreamAppendEnc(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2)\n{\n    PV_STATUS status;\n    UChar *ptrBS2, *ptrBS1;\n    UChar byteBS2, byteBS1;\n    Int  numbyte2;\n    Int bitused, bitleft, offset, fraction;\n\n    status = BitstreamSavePartial(bitstream1, &fraction);\n    if (status != PV_SUCCESS)\n    {\n        return status;\n    }\n\n    offset = fraction;\n    status = BitstreamSavePartial(bitstream2, &fraction);\n    if (status != PV_SUCCESS)\n    {\n        return status;\n    }\n\n    if (!offset) /* bitstream1 is byte-aligned */\n    {\n        return BitstreamAppendPacket(bitstream1, bitstream2);\n    }\n\n    offset += fraction;\n\n    /* since bitstream1 doesn't have to be byte-aligned, we have to process byte by byte */\n    /* we read one byte from bitstream2 and use BitstreamPutBits to do the job */\n    if (bitstream1->byteCount + bitstream2->byteCount + offset > bitstream1->bufferSize)\n    {\n        if (PV_SUCCESS != BitstreamUseOverrunBuffer(bitstream1, bitstream2->byteCount + offset))\n        {\n            bitstream1->byteCount += (bitstream2->byteCount + offset);\n            return PV_FAIL;\n        }\n    }\n\n    ptrBS1 = bitstream1->bitstreamBuffer + bitstream1->byteCount; /* move ptr bs1*/\n    ptrBS2 = bitstream2->bitstreamBuffer;\n\n    bitused = (WORD_SIZE << 3) - bitstream1->bitLeft; /* this must be between 1-7 */\n    bitleft = 8 - bitused;\n\n    numbyte2 = bitstream2->byteCount;   /* number of byte to copy from bs2 */\n    bitstream1->byteCount += numbyte2;  /* new byteCount */\n\n    byteBS1 = ((UChar) bitstream1->word) << bitleft;    /* fraction byte from bs1 */\n\n    while (numbyte2)\n    {\n        byteBS2 = *ptrBS2++;\n        byteBS1 |= (byteBS2 >> bitused);\n        *ptrBS1++ = byteBS1;\n        byteBS1 = byteBS2 << bitleft;\n        numbyte2--;\n    }\n\n    bitstream1->word = byteBS1 >> bitleft;  /* bitstream->bitLeft remains the same */\n\n    /* now save bs2->word in bs1 */\n    status = BitstreamPutBits(bitstream1, (WORD_SIZE << 3) - bitstream2->bitLeft, bitstream2->word);\n\n    return status;\n}\n\n/* ======================================================================== */\n/*  Function : BitstreamAppendPacket( BitstreamEncVideo *bitstream1,        */\n/*                                      BitstreamEncVideo *bitstream2   )   */\n/*  Date     : 05/31/2001                                                   */\n/*  Purpose  : Append the intermediate bitstream (bitstream2) to the end of */\n/*              output bitstream(bitstream1) knowing that bitstream1 is byte-aligned*/\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nPV_STATUS BitstreamAppendPacket(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2)\n{\n    UChar *ptrBS2, *ptrBS1;\n    Int  numbyte2;\n\n    if (bitstream1->byteCount + bitstream2->byteCount  > bitstream1->bufferSize)\n    {\n        if (PV_SUCCESS != BitstreamUseOverrunBuffer(bitstream1, bitstream2->byteCount))\n        {\n            bitstream1->byteCount += bitstream2->byteCount; /* legacy, to keep track of total bytes */\n            return PV_FAIL;\n        }\n    }\n\n    ptrBS1 = bitstream1->bitstreamBuffer + bitstream1->byteCount; /* move ptr bs1*/\n    ptrBS2 = bitstream2->bitstreamBuffer;\n\n    numbyte2 = bitstream2->byteCount;\n    bitstream1->byteCount += numbyte2; /* new byteCount */\n\n    /*copy all the bytes in bitstream2*/\n    M4VENC_MEMCPY(ptrBS1, ptrBS2, sizeof(UChar)*numbyte2);\n\n    bitstream1->word = bitstream2->word;  /* bitstream1->bitLeft is the same */\n    bitstream1->bitLeft = bitstream2->bitLeft;\n\n    return PV_SUCCESS;\n}\n\n/* ======================================================================== */\n/*  Function : BitstreamAppendPacketNoOffset( BitstreamEncVideo *bitstream1,*/\n/*                                      BitstreamEncVideo *bitstream2   )   */\n/*  Date     : 04/23/2002                                                   */\n/*  Purpose  : Append the intermediate bitstream (bitstream2) to the end of */\n/*              output bitstream(bitstream1) , for slice-based coding only */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nPV_STATUS BitstreamAppendPacketNoOffset(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2)\n{\n    PV_STATUS status = PV_SUCCESS;\n    UChar *ptrBS2, *ptrBS1;\n    Int  numbyte2;\n    Int  byteleft;\n\n    numbyte2 = bitstream2->byteCount;\n\n    if (bitstream1->byteCount + bitstream2->byteCount > bitstream1->bufferSize)\n    {\n        numbyte2 =  bitstream1->bufferSize - bitstream1->byteCount;\n        status =  PV_END_OF_BUF;    /* signal end of buffer */\n    }\n\n    ptrBS1 = bitstream1->bitstreamBuffer; /* move ptr bs1*/\n    ptrBS2 = bitstream2->bitstreamBuffer;\n\n    bitstream1->byteCount += numbyte2; /* should be equal to bufferSize */\n\n    /*copy all the bytes in bitstream2*/\n    M4VENC_MEMCPY(ptrBS1, ptrBS2, sizeof(UChar)*numbyte2);\n    bitstream1->word = 0;\n    bitstream1->bitLeft = (WORD_SIZE << 3);\n\n    if (status == PV_END_OF_BUF) /* re-position bitstream2 */\n    {\n        byteleft = bitstream2->byteCount - numbyte2;\n\n        M4VENC_MEMCPY(ptrBS2, ptrBS2 + numbyte2, sizeof(UChar)*byteleft);\n\n        bitstream2->byteCount = byteleft;\n        /* bitstream2->word and bitstream->bitLeft are unchanged.\n           they should be 0 and (WORD_SIZE<<3) */\n    }\n\n    return status;\n}\n\n#ifndef NO_SLICE_ENCODE\n/* ======================================================================== */\n/*  Function : BitstreamRepos( BitstreamEncVideo *bitstream,                */\n/*                                      Int byteCount, Int bitCount)        */\n/*  Date     : 04/28/2002                                                   */\n/*  Purpose  : Reposition the size of the buffer content (curtail)          */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nPV_STATUS   BitstreamRepos(BitstreamEncVideo *bitstream, Int byteCount, Int bitCount)\n{\n    UChar *ptr, byte;\n    UInt word;\n    Int fraction;\n\n    BitstreamSavePartial(bitstream, &fraction);\n\n    bitstream->byteCount = byteCount;\n    ptr = bitstream->bitstreamBuffer + byteCount; /* get fraction of the byte */\n    if (bitCount)\n    {\n        bitstream->bitLeft = (WORD_SIZE << 3) - bitCount; /* bitCount should be 0-31 */\n        word = *ptr++;\n        byte = *ptr++;\n        word = byte | (word << 8);\n#if (WORD_SIZE == 4)\n        byte = *ptr++;\n        word = byte | (word << 8);\n        byte = *ptr++;\n        word = byte | (word << 8);\n#endif\n        bitstream->word = word >> (bitstream->bitLeft);\n    }\n    else\n    {\n        bitstream->word = 0;\n        bitstream->bitLeft = (WORD_SIZE << 3);\n    }\n\n    return PV_SUCCESS;\n}\n\n/* ======================================================================== */\n/*  Function : BitstreamFlushBits(BitstreamEncVideo *bitstream1,            */\n/*                              Int num_bit_left)                           */\n/*  Date     : 04/24/2002                                                   */\n/*  Purpose  : Flush buffer except the last num_bit_left bits.              */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\n\nPV_STATUS BitstreamFlushBits(BitstreamEncVideo *bitstream1, Int num_bit_left)\n{\n    Int i;\n    UChar *ptrDst, *ptrSrc;\n    Int leftover, bitused;\n    Int new_byte = (num_bit_left >> 3);\n    Int new_bit = num_bit_left - (new_byte << 3); /* between 0-7 */\n\n    ptrSrc = bitstream1->bitstreamBuffer + bitstream1->byteCount;\n    ptrDst = bitstream1->bitstreamBuffer;\n\n    bitused = (WORD_SIZE << 3) - bitstream1->bitLeft;\n\n    leftover = 8 - bitused; /* bitused should be between 0-7 */\n\n    bitstream1->byteCount = new_byte;\n    bitstream1->bitLeft = (WORD_SIZE << 3) - new_bit;\n\n    if (!bitused) /* byte aligned */\n    {\n        M4VENC_MEMCPY(ptrDst, ptrSrc, new_byte + 1);\n    }\n    else\n    {\n        /*copy all the bytes in bitstream2*/\n        for (i = 0; i < new_byte; i++)\n        {\n            *ptrDst++ = (ptrSrc[0] << bitused) | (ptrSrc[1] >> leftover);\n            ptrSrc++;\n        }\n        /* copy for the last byte of ptrSrc, copy extra bits doesn't hurt */\n        if (new_bit)\n        {\n            *ptrDst++ = (ptrSrc[0] << bitused) | (ptrSrc[1] >> leftover);\n            ptrSrc++;\n        }\n    }\n    if (new_bit)\n    {\n        ptrSrc = bitstream1->bitstreamBuffer + new_byte;\n        bitstream1->word = (*ptrSrc) >> (8 - new_bit);\n    }\n\n    return PV_SUCCESS;\n}\n\n/* ======================================================================== */\n/*  Function : BitstreamPrependPacket( BitstreamEncVideo *bitstream1,       */\n/*                                      BitstreamEncVideo *bitstream2   )   */\n/*  Date     : 04/26/2002                                                   */\n/*  Purpose  : Prepend the intermediate bitstream (bitstream2) to the beginning of */\n/*              output bitstream(bitstream1) */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nPV_STATUS BitstreamPrependPacket(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2)\n{\n    UChar *pSrc, *pDst, byte;\n    Int     movebyte, bitused, leftover, i, fraction;\n\n    BitstreamSavePartial(bitstream2, &fraction); /* make sure only fraction of byte left */\n    BitstreamSavePartial(bitstream1, &fraction);\n\n    if (bitstream1->byteCount + bitstream2->byteCount >= bitstream1->bufferSize)\n    {\n        bitstream1->byteCount += bitstream2->byteCount;\n        return PV_END_OF_BUF;\n    }\n\n    movebyte = bitstream1->byteCount;\n    if (movebyte < bitstream2->byteCount)\n        movebyte = bitstream2->byteCount;\n    movebyte++;\n\n    /* shift bitstream1 to the right by movebyte */\n    pSrc = bitstream1->bitstreamBuffer;\n    pDst = pSrc + movebyte;\n\n    M4VENC_MEMCPY(pDst, pSrc, bitstream1->byteCount + 1);\n\n    /* copy bitstream2 to the beginning of bitstream1 */\n    M4VENC_MEMCPY(pSrc, bitstream2->bitstreamBuffer, bitstream2->byteCount + 1);\n\n    /* now shift back previous bitstream1 buffer to the end */\n    pSrc = pDst;\n    pDst = bitstream1->bitstreamBuffer + bitstream2->byteCount;\n\n    bitused = (WORD_SIZE << 3) - bitstream2->bitLeft;\n    leftover = 8 - bitused;     /* bitused should be 0-7 */\n\n    byte = (bitstream2->word) << leftover;\n\n    *pDst++ = byte | (pSrc[0] >> bitused);\n\n    for (i = 0; i < bitstream1->byteCount + 1; i++)\n    {\n        *pDst++ = ((pSrc[0] << leftover) | (pSrc[1] >> bitused));\n        pSrc++;\n    }\n\n    bitstream1->byteCount += bitstream2->byteCount;\n    //bitstream1->bitCount += bitstream2->bitCount;\n    bitused = (WORD_SIZE << 4) - (bitstream1->bitLeft + bitstream2->bitLeft);\n\n    if (bitused >= 8)\n    {\n        bitused -= 8;\n        bitstream1->byteCount++;\n    }\n\n    bitstream1->bitLeft = (WORD_SIZE << 3) - bitused;\n\n    bitstream2->byteCount = bitstream2->word = 0;\n    bitstream2->bitLeft = (WORD_SIZE << 3);\n\n    pSrc = bitstream1->bitstreamBuffer + bitstream1->byteCount;\n    leftover = 8 - bitused;\n    //*pSrc = (pSrc[0]>>leftover)<<leftover; /* make sure the rest of bits are zeros */\n\n    bitstream1->word = (UInt)((pSrc[0]) >> leftover);\n\n    return PV_SUCCESS;\n}\n#endif  /* NO_SLICE_ENCODE */\n\n\n/* ======================================================================== */\n/*  Function : BitstreamGetPos( BitstreamEncVideo *stream                   */\n/*  Date     : 08/05/2004                                                   */\n/*  Purpose  : Get the bit position.                                        */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nInt BitstreamGetPos(BitstreamEncVideo *stream)\n{\n\n    return stream->byteCount*8 + (WORD_SIZE << 3) - stream->bitLeft;\n}\n\nvoid BitstreamEncReset(BitstreamEncVideo *stream)\n{\n    stream->bitLeft = (WORD_SIZE << 3);\n    stream->word = 0;\n    stream->byteCount = 0;\n    return ;\n}\n\n/* This function set the overrun buffer, and VideoEncData context for callback to reallocate\noverrun buffer.  */\nVoid  BitstreamSetOverrunBuffer(BitstreamEncVideo* stream, UChar* overrunBuffer, Int oBSize, VideoEncData *video)\n{\n    stream->overrunBuffer = overrunBuffer;\n    stream->oBSize = oBSize;\n    stream->video = video;\n\n    return ;\n}\n\n\n/* determine whether overrun buffer can be used or not */\nPV_STATUS BitstreamUseOverrunBuffer(BitstreamEncVideo* stream, Int numExtraBytes)\n{\n    VideoEncData *video = stream->video;\n\n    if (stream->overrunBuffer != NULL) // overrunBuffer is set\n    {\n        if (stream->bitstreamBuffer != stream->overrunBuffer) // not already used\n        {\n            if (stream->byteCount + numExtraBytes >= stream->oBSize)\n            {\n                stream->oBSize = stream->byteCount + numExtraBytes + 100;\n                stream->oBSize &= (~0x3); // make it multiple of 4\n\n                // allocate new overrun Buffer\n                if (video->overrunBuffer)\n                {\n                    M4VENC_FREE(video->overrunBuffer);\n                }\n                video->oBSize = stream->oBSize;\n                video->overrunBuffer = (UChar*) M4VENC_MALLOC(sizeof(UChar) * stream->oBSize);\n                stream->overrunBuffer = video->overrunBuffer;\n                if (stream->overrunBuffer == NULL)\n                {\n                    return PV_FAIL;\n                }\n            }\n\n            // copy everything to overrun buffer and start using it.\n            oscl_memcpy(stream->overrunBuffer, stream->bitstreamBuffer, stream->byteCount);\n            stream->bitstreamBuffer = stream->overrunBuffer;\n            stream->bufferSize = stream->oBSize;\n        }\n        else // overrun buffer is already used\n        {\n            if (stream->byteCount + numExtraBytes >= stream->oBSize)\n            {\n                stream->oBSize = stream->byteCount + numExtraBytes + 100;\n            }\n\n            // allocate new overrun buffer\n            stream->oBSize &= (~0x3); // make it multiple of 4\n            video->oBSize = stream->oBSize;\n            video->overrunBuffer = (UChar*) M4VENC_MALLOC(sizeof(UChar) * stream->oBSize);\n            if (video->overrunBuffer == NULL)\n            {\n                return PV_FAIL;\n            }\n\n            // copy from the old buffer to new buffer\n            oscl_memcpy(video->overrunBuffer, stream->overrunBuffer, stream->byteCount);\n            // free old buffer\n            M4VENC_FREE(stream->overrunBuffer);\n            // assign pointer to new buffer\n            stream->overrunBuffer = video->overrunBuffer;\n            stream->bitstreamBuffer = stream->overrunBuffer;\n            stream->bufferSize = stream->oBSize;\n        }\n\n        return PV_SUCCESS;\n    }\n    else // overrunBuffer is not enable.\n    {\n        return PV_FAIL;\n    }\n\n}\n\n\n\n\n\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/bitstream_io.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef _BITSTREAM_IO_H_\n#define _BITSTREAM_IO_H_\n\n#define BitstreamPut1Bits(x,y)  BitstreamPutBits(x,1,y)\n#define BitstreamPutGT8Bits(x,y,z) BitstreamPutBits(x,y,z)\n\n#include \"mp4lib_int.h\"\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n    BitstreamEncVideo *BitStreamCreateEnc(Int bufferSize);\n    Void  BitstreamCloseEnc(BitstreamEncVideo *stream);\n    PV_STATUS BitstreamPutBits(BitstreamEncVideo *stream, Int Length, UInt Value);\n    PV_STATUS BitstreamPutGT16Bits(BitstreamEncVideo *stream, Int Length, ULong Value);\n    PV_STATUS BitstreamSaveWord(BitstreamEncVideo *stream);\n    PV_STATUS BitstreamSavePartial(BitstreamEncVideo *stream, Int *fraction);\n    Int BitstreamGetPos(BitstreamEncVideo *stream);\n    void BitstreamEncReset(BitstreamEncVideo *stream);\n\n    Int BitstreamShortHeaderByteAlignStuffing(BitstreamEncVideo *stream);\n    Int BitstreamMpeg4ByteAlignStuffing(BitstreamEncVideo *stream);\n    PV_STATUS BitstreamAppendEnc(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2);\n    PV_STATUS BitstreamAppendPacket(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2);\n    PV_STATUS BitstreamAppendPacketNoOffset(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2);\n    PV_STATUS BitstreamRepos(BitstreamEncVideo *bitstream, Int byteCount, Int bitCount);\n    PV_STATUS BitstreamFlushBits(BitstreamEncVideo *bitstream1, Int num_bit_left);\n    PV_STATUS BitstreamPrependPacket(BitstreamEncVideo *bitstream1, BitstreamEncVideo *bitstream2);\n\n\n    Void  BitstreamSetOverrunBuffer(BitstreamEncVideo *stream, UChar *overrunBuffer, Int oBSize, VideoEncData *video);\n    PV_STATUS BitstreamUseOverrunBuffer(BitstreamEncVideo* stream, Int numExtraBytes);\n\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* _BITSTREAM_IO_H_ */\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 2009 OrangeLabs\n *\n * Author: Alexis Gilabert Senar\n * Date: 2009-07-01\n * -------------------------------------------------------------------\n */\n#define LOG_TAG \"NativeEnc\"\n#include \"com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder.h\"\n#include <stdio.h>\n#include \"mp4enc_api.h\"\n\n\n    VideoEncControls iEncoderControl;\n    VideoEncOptions aEncOption;\n    int iSrcHeight;\n    int iSrcWidth;\n    unsigned long long NextTimestamp;\n    uint8* aOutBuffer;\n    uint8* YUV;\n    uint8* yuvPtr;\n    ULong modTime;\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder\n * Method:    InitEncoder\n * Signature: (Lcom/orangelabs/rcs/core/ims/protocol/rtp/codec/h263/encoder/EncOptions;)I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder_InitEncoder\n  (JNIEnv *env, jclass iclass, jobject params)\n{\n    /**\n      * Clean encoder\n      */\n    memset((void*) &iEncoderControl, 0, sizeof(VideoEncControls));\n    PVCleanUpVideoEncoder(&iEncoderControl);\n\n    /**\n      * Setting encoder options\n      */\n    PVGetDefaultEncOption(&aEncOption, 0);\n\n    jclass objClass = (env)->GetObjectClass(params);\n    if (objClass == NULL) return -2;\n\n    jfieldID encMode = (env)->GetFieldID(objClass,\"encMode\",\"I\");\n    if (encMode == NULL) return -3;\n    aEncOption.encMode= (MP4EncodingMode)(env)->GetIntField(params,encMode);\n    //LOGI(\"[H263Encoder parameters] encMode = %d\",aEncOption.encMode);\n\n    jfieldID packetSize = (env)->GetFieldID(objClass,\"packetSize\",\"I\");\n    if (packetSize == NULL) return -4;\n    aEncOption.packetSize= (env)->GetIntField(params,packetSize);\n    //LOGI(\"[H263Encoder parameters] packetSize = %d\",aEncOption.packetSize);\n\n    jfieldID profile_level = (env)->GetFieldID(objClass,\"profile_level\",\"I\");\n    if (profile_level == NULL) return -5;\n    aEncOption.profile_level= (ProfileLevelType)(env)->GetIntField(params,profile_level);\n    //LOGI(\"[H263Encoder parameters] profile_level = %d\",aEncOption.profile_level);\n\n    jfieldID rvlcEnable = (env)->GetFieldID(objClass,\"rvlcEnable\",\"Z\");\n    if (rvlcEnable == NULL) return -6;\n    aEncOption.rvlcEnable = ((env)->GetBooleanField(params,rvlcEnable) == true)? (ParamEncMode)1:(ParamEncMode)0;\n    //LOGI(\"[H263Encoder parameters] rvlcEnable = %d\",aEncOption.rvlcEnable);\n\n    jfieldID gobHeaderInterval = (env)->GetFieldID(objClass,\"gobHeaderInterval\",\"I\");\n    if (gobHeaderInterval == NULL) return -7;\n    aEncOption.gobHeaderInterval = (env)->GetIntField(params,gobHeaderInterval);\n    //LOGI(\"[H263Encoder parameters] gobHeaderInterval = %d\",aEncOption.gobHeaderInterval);\n\n    jfieldID numLayers = (env)->GetFieldID(objClass,\"numLayers\",\"I\");\n    if (numLayers == NULL) return -8;\n    aEncOption.numLayers= (env)->GetIntField(params,numLayers);\n    //LOGI(\"[H263Encoder parameters] numLayers = %d\",aEncOption.numLayers);\n\n    jfieldID timeIncRes = (env)->GetFieldID(objClass,\"timeIncRes\",\"I\");\n    if (timeIncRes == NULL) return -9;\n    aEncOption.timeIncRes = (env)->GetIntField(params,timeIncRes);\n    //LOGI(\"[H263Encoder parameters] timeIncRes = %d\",aEncOption.timeIncRes);\n\n    jfieldID tickPerSrc = (env)->GetFieldID(objClass,\"tickPerSrc\",\"I\");\n    if (tickPerSrc == NULL) return -10;\n    aEncOption.tickPerSrc= (env)->GetIntField(params,tickPerSrc);\n    //LOGI(\"[H263Encoder parameters] tickPerSrc = %d\",aEncOption.tickPerSrc);\n\n    jfieldID encHeight = (env)->GetFieldID(objClass,\"encHeight\",\"I\");\n    if (encHeight == NULL) return -11;\n    aEncOption.encHeight[0] = aEncOption.encHeight[1] = (env)->GetIntField(params,encHeight);\n    //LOGI(\"[H263Encoder parameters] encHeight = %d\",aEncOption.encHeight[0]);\n\n    jfieldID encWidth = (env)->GetFieldID(objClass,\"encWidth\",\"I\");\n    if (encWidth == NULL) return -12;\n    aEncOption.encWidth[0] = aEncOption.encWidth[1] = (env)->GetIntField(params,encWidth);\n    //LOGI(\"[H263Encoder parameters] encWidth = %d\",aEncOption.encWidth[0]);\n\n    jfieldID encFrameRate = (env)->GetFieldID(objClass,\"encFrameRate\",\"F\");\n    if (encFrameRate == NULL) return -13;\n    aEncOption.encFrameRate[0] = aEncOption.encFrameRate[1] = (env)->GetFloatField(params,encFrameRate);\n    //LOGI(\"[H263Encoder parameters] encFrameRate = %f\",aEncOption.encFrameRate[0]);\n\n    jfieldID bitRate = (env)->GetFieldID(objClass,\"bitRate\",\"I\");\n    if (bitRate == NULL) return -14;\n    aEncOption.bitRate[0] = aEncOption.bitRate[1] = (env)->GetIntField(params,bitRate);\n    //LOGI(\"[H263Encoder parameters] bitRate = %d\",aEncOption.bitRate[0]);\n\n    jfieldID iQuant = (env)->GetFieldID(objClass,\"iQuant\",\"I\");\n    if (iQuant == NULL) return -15;\n    aEncOption.iQuant[0] = aEncOption.iQuant[1] = (env)->GetIntField(params,iQuant);\n    //LOGI(\"[H263Encoder parameters] iQuant = %d\",aEncOption.iQuant[0]);\n\n    jfieldID pQuant = (env)->GetFieldID(objClass,\"pQuant\",\"I\");\n    if (pQuant == NULL) return -16;\n    aEncOption.pQuant[0] = aEncOption.pQuant[1] = (env)->GetIntField(params,pQuant);\n    //LOGI(\"[H263Encoder parameters] pQuant = %d\",aEncOption.pQuant[0]);\n\n    jfieldID quantType = (env)->GetFieldID(objClass,\"quantType\",\"I\");\n    if (quantType == NULL) return -17;\n    aEncOption.quantType[0] = aEncOption.quantType[1] = (env)->GetIntField(params,quantType);\n    //LOGI(\"[H263Encoder parameters] quantType = %d\",aEncOption.quantType[0]);\n\n    jfieldID rcType = (env)->GetFieldID(objClass,\"rcType\",\"I\");\n    if (rcType == NULL) return -18;\n    aEncOption.rcType = (MP4RateControlType)(env)->GetIntField(params,rcType);\n    //LOGI(\"[H263Encoder parameters] rcType = %d\",aEncOption.rcType);\n\n    jfieldID vbvDelay = (env)->GetFieldID(objClass,\"vbvDelay\",\"F\");\n    if (vbvDelay == NULL) return -19;\n    aEncOption.vbvDelay = (env)->GetFloatField(params,vbvDelay);\n    //LOGI(\"[H263Encoder parameters] vbvDelay = %f\",aEncOption.vbvDelay);\n\n    jfieldID noFrameSkipped = (env)->GetFieldID(objClass,\"noFrameSkipped\",\"Z\");\n    if (noFrameSkipped == NULL) return -20;\n    aEncOption.noFrameSkipped = ((env)->GetBooleanField(params,noFrameSkipped) == true)? (ParamEncMode)1:(ParamEncMode)0;\n    //LOGI(\"[H263Encoder parameters] noFrameSkipped = %d\",aEncOption.noFrameSkipped);\n\n    jfieldID intraPeriod = (env)->GetFieldID(objClass,\"intraPeriod\",\"I\");\n    if (intraPeriod == NULL) return -21;\n    aEncOption.intraPeriod = (env)->GetIntField(params,intraPeriod);\n    //LOGI(\"[H263Encoder parameters] intraPeriod = %d\",aEncOption.intraPeriod);\n\n    jfieldID numIntraMB = (env)->GetFieldID(objClass,\"numIntraMB\",\"I\");\n    if (numIntraMB == NULL) return -22;\n    aEncOption.numIntraMB = (env)->GetIntField(params,numIntraMB);\n    //LOGI(\"[H263Encoder parameters] numIntraMB = %d\",aEncOption.numIntraMB);\n\n    jfieldID sceneDetect = (env)->GetFieldID(objClass,\"sceneDetect\",\"Z\");\n    if (sceneDetect == NULL) return -23;\n    aEncOption.sceneDetect = ((env)->GetBooleanField(params,sceneDetect) == true)?(ParamEncMode)1:(ParamEncMode)0;\n    //LOGI(\"[H263Encoder parameters] sceneDetect = %d\",aEncOption.sceneDetect);\n\n    jfieldID searchRange = (env)->GetFieldID(objClass,\"searchRange\",\"I\");\n    if (searchRange == NULL) return -24;\n    aEncOption.searchRange = (env)->GetIntField(params,searchRange);\n    //LOGI(\"[H263Encoder parameters] searchRange = %d\",aEncOption.searchRange);\n\n    jfieldID mv8x8Enable = (env)->GetFieldID(objClass,\"mv8x8Enable\",\"Z\");\n    if (mv8x8Enable == NULL) return -25;\n    aEncOption.mv8x8Enable = ((env)->GetBooleanField(params,mv8x8Enable)==true)?(ParamEncMode)1:(ParamEncMode)0;\n    //LOGI(\"[H263Encoder parameters] mv8x8Enable = %d\",aEncOption.mv8x8Enable);\n\n    jfieldID intraDCVlcTh = (env)->GetFieldID(objClass,\"intraDCVlcTh\",\"I\");\n    if (intraDCVlcTh == NULL) return -26;\n    aEncOption.intraDCVlcTh = (env)->GetIntField(params,intraDCVlcTh);\n    //LOGI(\"[H263Encoder parameters] intraDCVlcTh= %d\",aEncOption.intraDCVlcTh);\n\n    jfieldID useACPred = (env)->GetFieldID(objClass,\"useACPred\",\"Z\");\n    if (useACPred == NULL) return -27;\n    aEncOption.useACPred = ((env)->GetBooleanField(params,useACPred)==true)?1:0;\n    //LOGI(\"[H263Encoder parameters] useACPred = %d\",aEncOption.useACPred);\n\n    /**\n      * Init\n      */\n    iSrcWidth = aEncOption.encWidth[0];\n    //LOGI(\"[H263Encoder parameters] iSrcWidth = %d\",iSrcWidth);\n    iSrcHeight = aEncOption.encHeight[0];\n    //LOGI(\"[H263Encoder parameters] iSrcHeight = %d\",iSrcHeight);\n    modTime = 0;\n\n    /**\n      * Init ptr for encode method\n      */\n    YUV =  (uint8*)malloc((iSrcWidth*iSrcHeight*3/2));\n    if (YUV == NULL){\n      return -1;\n    }\n    yuvPtr = YUV;\n\n    aOutBuffer = (uint8*)malloc((iSrcWidth*iSrcHeight*3/2));\n    if (aOutBuffer == NULL){\n      return -1;\n    }\n\n    /**\n      * Init encoder\n      */\n    return PVInitVideoEncoder(&iEncoderControl, &aEncOption);\n\n}\n\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder\n * Method:    EncodeFrame\n * Signature: ([BIJ)[B\n */\nJNIEXPORT jbyteArray JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder_EncodeFrame\n  (JNIEnv *env, jclass iclass, jbyteArray frame, jlong timestamp)\n{\n    VideoEncFrameIO vid_in, vid_out;\n    int Size = 0;\n    uint status;\n    int nLayer = 0;\n    jbyteArray result ;\n\n    /**\n      * Init for EncodeFrame\n      */\n    jint len = env->GetArrayLength(frame);\n    // Read the byte array\n    uint8* data = (uint8*)malloc(len);\n    uint8* whereToFree = data;\n    env->GetByteArrayRegion (frame, (jint)0, (jint)len, (jbyte*)data);\n\n    // Convert YUV input to have distinct Y U and V channels\n    // Copy Y data\n    yuvPtr = YUV;\n    for (int i=0;i<iSrcHeight;i++){\n      for (int j=0;j<iSrcWidth;j++){\n    \t  *yuvPtr= *data;\n    \t  yuvPtr++;\n    \t  data++;\n      }\n    }\n\n    // Copy UV data\n    uint8 *uPos = YUV + (iSrcWidth*iSrcHeight);\n    uint8 *vPos = YUV + (iSrcWidth*iSrcHeight) + ((iSrcWidth*iSrcHeight)>>2);\n    uint16 temp = 0;\n    uint16* iVideoPtr = (uint16*)data;\n    for (int i=0;i<(iSrcHeight>>1);i++){\n\tfor (int j=0;j<(iSrcWidth>>1);j++){\n\t\ttemp = *iVideoPtr++; // U1V1\n\t\t*vPos++= (uint8)(temp & 0xFF);\n\t\t*uPos++= (uint8)((temp >> 8) & 0xFF);\n\t}\n    }\n\n    vid_in.height = iSrcHeight;\n    vid_in.pitch = iSrcWidth;\n    vid_in.timestamp = (ULong)(timestamp & 0xFFFFFFFF);\n    vid_in.yChan = YUV;\n    vid_in.uChan = (YUV + vid_in.height * vid_in.pitch);\n    vid_in.vChan = vid_in.uChan + ((vid_in.height * vid_in.pitch) >> 2);\n    Size = len;\n\n    // encode the frame\n    status = PVEncodeVideoFrame(&iEncoderControl, &vid_in, &vid_out, &modTime, (UChar*)aOutBuffer, &Size, &nLayer);\n    if (status != 1) return (env)->NewByteArray(1);\n\n    // Copy aOutBuffer into result\n    result=(env)->NewByteArray(Size);\n    (env)->SetByteArrayRegion(result, 0, Size, (jbyte*)aOutBuffer);\n    free(whereToFree);\n    // Return\n    return result;\n\n}\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder\n * Method:    DeinitEncoder\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder_DeinitEncoder\n  (JNIEnv *env, jclass clazz){\n    return PVCleanUpVideoEncoder(&iEncoderControl);\n}\n\n/*\n * This is called by the VM when the shared library is first loaded.\n */\njint JNI_OnLoad(JavaVM* vm, void* reserved) {\n    JNIEnv* env = NULL;\n    jint result = -1;\n\n    if (vm->GetEnv((void**) &env, JNI_VERSION_1_4) != JNI_OK) {\n        goto bail;\n    }\n\n    /* success -- return valid version number */\n    result = JNI_VERSION_1_4;\n\nbail:\n    return result;\n}\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder.h",
    "content": "/* DO NOT EDIT THIS FILE - it is machine generated */\n#include <jni.h>\n/* Header for class com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder */\n\n#ifndef _Included_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder\n#define _Included_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder\n * Method:    InitEncoder\n * Signature: (Lcom/orangelabs/rcs/core/ims/protocol/rtp/codec/h263/encoder/EncOptions;)I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder_InitEncoder\n  (JNIEnv *, jclass, jobject);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder\n * Method:    EncodeFrame\n * Signature: ([BJ)[B\n */\nJNIEXPORT jbyteArray JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder_EncodeFrame\n  (JNIEnv *, jclass, jbyteArray, jlong);\n\n/*\n * Class:     com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder\n * Method:    DeinitEncoder\n * Signature: ()I\n */\nJNIEXPORT jint JNICALL Java_com_orangelabs_rcs_core_ims_protocol_rtp_codec_video_h263_encoder_NativeH263Encoder_DeinitEncoder\n  (JNIEnv *, jclass);\n\n#ifdef __cplusplus\n}\n#endif\n#endif\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/combined_encode.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"mp4def.h\"\n#include \"mp4enc_lib.h\"\n#include \"mp4lib_int.h\"\n#include \"bitstream_io.h\"\n#include \"vlc_encode.h\"\n#include \"m4venc_oscl.h\"\n\nPV_STATUS EncodeGOBHeader(VideoEncData *video, Int GOB_number, Int quant_scale, Int bs1stream);\n\n/* ======================================================================== */\n/*  Function : EncodeFrameCombinedMode()                                    */\n/*  Date     : 09/01/2000                                                   */\n/*  History  :                                                              */\n/*  Purpose  : Encode a frame of MPEG4 bitstream in Combined mode.          */\n/*  In/out   :                                                              */\n/*  Return   :  PV_SUCCESS if successful else PV_FAIL                       */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\nPV_STATUS EncodeFrameCombinedMode(VideoEncData *video)\n{\n    PV_STATUS status = PV_SUCCESS;\n    Vol *currVol = video->vol[video->currLayer];\n    Vop *currVop = video->currVop;\n    VideoEncParams *encParams = video->encParams;\n    Int width = currVop->width; /* has to be Vop, for multiple of 16 */\n    Int lx = currVop->pitch; /* with padding */\n    Int offset = 0;\n    Int ind_x, ind_y;\n    Int start_packet_header = 0;\n    UChar *QPMB = video->QPMB;\n    Int QP;\n    Int mbnum = 0, slice_counter = 0, curr_slice_counter = 0;\n    Int num_bits, packet_size = encParams->ResyncPacketsize;\n    Int GOB_Header_Interval = encParams->GOB_Header_Interval;\n    BitstreamEncVideo *bs1 = video->bitstream1;\n    Int numHeaderBits;\n    approxDCT fastDCTfunction;\n    Int ncoefblck[6] = {64, 64, 64, 64, 64, 64}; /* for FastCodeMB,  5/18/2001 */\n    PV_STATUS(*CodeMB)(VideoEncData *, approxDCT *, Int, Int[]);\n    void (*MBVlcEncode)(VideoEncData*, Int[], void *);\n    void (*BlockCodeCoeff)(RunLevelBlock*, BitstreamEncVideo*, Int, Int, UChar);\n\n    /* for H263 GOB changes */\n//MP4RateControlType rc_type = encParams->RC_Type;\n\n    video->QP_prev = currVop->quantizer;\n\n    numHeaderBits = BitstreamGetPos(bs1);\n\n    /* determine type of quantization   */\n#ifndef NO_MPEG_QUANT\n    if (currVol->quantType == 0)\n        CodeMB = &CodeMB_H263;\n    else\n        CodeMB = &CodeMB_MPEG;\n#else\n    CodeMB = &CodeMB_H263;\n#endif\n\n    /* determine which functions to be used, in MB-level */\n    if (currVop->predictionType == P_VOP)\n        MBVlcEncode = &MBVlcEncodeCombined_P_VOP;\n    else if (currVop->predictionType == I_VOP)\n        MBVlcEncode = &MBVlcEncodeCombined_I_VOP;\n    else /* B_VOP not implemented yet */\n        return PV_FAIL;\n\n    /* determine which VLC table to be used */\n#ifndef H263_ONLY\n    if (currVol->shortVideoHeader)\n        BlockCodeCoeff = &BlockCodeCoeff_ShortHeader;\n#ifndef NO_RVLC\n    else if (currVol->useReverseVLC)\n        BlockCodeCoeff = &BlockCodeCoeff_RVLC;\n#endif\n    else\n        BlockCodeCoeff = &BlockCodeCoeff_Normal;\n#else\n    BlockCodeCoeff = &BlockCodeCoeff_ShortHeader;\n#endif\n\n    /* gob_frame_id is the same for different vop types - the reason should be SCD */\n    if (currVol->shortVideoHeader && currVop->gobFrameID != currVop->predictionType)\n        currVop->gobFrameID = currVop->predictionType;\n\n\n    video->usePrevQP = 0;\n\n    for (ind_y = 0; ind_y < currVol->nMBPerCol; ind_y++)  /* Col MB Loop */\n    {\n\n        video->outputMB->mb_y = ind_y; /*  5/28/01 */\n\n        if (currVol->shortVideoHeader)  /* ShortVideoHeader Mode */\n        {\n\n            if (slice_counter && GOB_Header_Interval && (ind_y % GOB_Header_Interval == 0))     /* Encode GOB Header */\n            {\n                QP = QPMB[mbnum];    /* Get quant_scale */\n                video->header_bits -= BitstreamGetPos(currVol->stream); /* Header Bits */\n                status = EncodeGOBHeader(video, slice_counter, QP, 0);  //ind_y     /* Encode GOB Header */\n                video->header_bits += BitstreamGetPos(currVol->stream); /* Header Bits */\n                curr_slice_counter = slice_counter;\n            }\n        }\n\n        for (ind_x = 0; ind_x < currVol->nMBPerRow; ind_x++)  /* Row MB Loop */\n        {\n            video->outputMB->mb_x = ind_x; /*  5/28/01 */\n            video->mbnum = mbnum;\n            QP = QPMB[mbnum];   /* always read new QP */\n\n            if (GOB_Header_Interval)\n                video->sliceNo[mbnum] = curr_slice_counter; /* Update MB slice number */\n            else\n                video->sliceNo[mbnum] = slice_counter;\n\n            /****************************************************************************************/\n            /* MB Prediction:Put into MC macroblock, substract from currVop, put in predMB */\n            /****************************************************************************************/\n            getMotionCompensatedMB(video, ind_x, ind_y, offset);\n\n#ifndef H263_ONLY\n            if (start_packet_header)\n            {\n                slice_counter++;                        /* Increment slice counter */\n                video->sliceNo[mbnum] = slice_counter;  /* Update MB slice number*/\n                video->header_bits -= BitstreamGetPos(bs1); /* Header Bits */\n                video->QP_prev = currVop->quantizer;\n                status = EncodeVideoPacketHeader(video, mbnum, video->QP_prev, 0);\n                video->header_bits += BitstreamGetPos(bs1); /* Header Bits */\n                numHeaderBits = BitstreamGetPos(bs1);\n                start_packet_header = 0;\n                video->usePrevQP = 0;\n            }\n#endif\n            /***********************************************/\n            /* Code_MB:  DCT, Q, Q^(-1), IDCT, Motion Comp */\n            /***********************************************/\n\n            status = (*CodeMB)(video, &fastDCTfunction, (offset << 5) + QP, ncoefblck);\n\n            /************************************/\n            /* MB VLC Encode: VLC Encode MB     */\n            /************************************/\n\n            (*MBVlcEncode)(video, ncoefblck, (void*)BlockCodeCoeff);\n\n            /*************************************************************/\n            /* Assemble Packets:  Assemble the MB VLC codes into Packets */\n            /*************************************************************/\n\n            /* Assemble_Packet(video) */\n#ifndef H263_ONLY\n            if (!currVol->shortVideoHeader) /* Not in ShortVideoHeader mode */\n            {\n                if (!currVol->ResyncMarkerDisable) /* RESYNC MARKER MODE */\n                {\n                    num_bits = BitstreamGetPos(bs1) - numHeaderBits;\n                    if (num_bits > packet_size)\n                    {\n                        video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1); /* Byte align Packet */\n\n                        status = BitstreamAppendPacket(currVol->stream, bs1); /* Put Packet to Buffer */\n                        /* continue even if status == PV_END_OF_BUF, to get the stats */\n\n                        BitstreamEncReset(bs1);\n\n                        start_packet_header = 1;\n                    }\n                }\n                else   /* NO RESYNC MARKER MODE */\n                {\n                    status = BitstreamAppendEnc(currVol->stream, bs1); /* Initialize to 0 */\n                    /* continue even if status == PV_END_OF_BUF, to get the stats */\n\n                    BitstreamEncReset(bs1);\n                }\n            }\n            else\n#endif /* H263_ONLY */\n            {   /* ShortVideoHeader Mode */\n                status = BitstreamAppendEnc(currVol->stream, bs1);  /* Initialize to 0 */\n                /* continue even if status == PV_END_OF_BUF, to get the stats */\n\n                BitstreamEncReset(bs1);\n            }\n            mbnum++;\n            offset += 16;\n        } /* End of For ind_x */\n\n        offset += (lx << 4) - width;\n        if (currVol->shortVideoHeader)  /* ShortVideoHeader = 1 */\n        {\n\n            if (GOB_Header_Interval)  slice_counter++;\n        }\n\n    } /* End of For ind_y */\n\n    if (currVol->shortVideoHeader) /* ShortVideoHeader = 1 */\n    {\n\n        video->header_bits += BitstreamShortHeaderByteAlignStuffing(currVol->stream); /* Byte Align */\n    }\n#ifndef H263_ONLY\n    else   /* Combined Mode*/\n    {\n        if (!currVol->ResyncMarkerDisable) /* Resync Markers */\n        {\n\n            if (!start_packet_header)\n            {\n                video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1);/* Byte Align  */\n\n                status = BitstreamAppendPacket(currVol->stream, bs1);   /* Put Packet to Buffer */\n                /* continue even if status == PV_END_OF_BUF, to get the stats */\n\n                BitstreamEncReset(bs1);\n            }\n        }\n        else   /* No Resync Markers */\n        {\n            video->header_bits += BitstreamMpeg4ByteAlignStuffing(currVol->stream); /* Byte Align */\n        }\n    }\n#endif /* H263_ONLY */\n\n    return status; /* if status == PV_END_OF_BUF, this frame will be pre-skipped */\n}\n\n#ifndef NO_SLICE_ENCODE\n/* ======================================================================== */\n/*  Function : EncodeSliceCombinedMode()                                    */\n/*  Date     : 04/19/2002                                                   */\n/*  History  :                                                              */\n/*  Purpose  : Encode a slice of MPEG4 bitstream in Combined mode and save  */\n/*              the current MB to continue next time it is called.          */\n/*  In/out   :                                                              */\n/*  Return   :  PV_SUCCESS if successful else PV_FAIL                       */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\nPV_STATUS EncodeSliceCombinedMode(VideoEncData *video)\n{\n    PV_STATUS status = PV_SUCCESS;\n    Vol *currVol = video->vol[video->currLayer];\n    Vop *currVop = video->currVop;\n    UChar mode = MODE_INTRA;\n    UChar *Mode = video->headerInfo.Mode;\n    VideoEncParams *encParams = video->encParams;\n    Int nTotalMB = currVol->nTotalMB;\n    Int width = currVop->width; /* has to be Vop, for multiple of 16 */\n    Int lx = currVop->pitch; /* , with padding */\n//  rateControl *rc = encParams->rc[video->currLayer];\n    UChar *QPMB = video->QPMB;\n    Int QP;\n    Int ind_x = video->outputMB->mb_x, ind_y = video->outputMB->mb_y;\n    Int offset = video->offset;                 /* get current MB location */\n    Int mbnum = video->mbnum, slice_counter = video->sliceNo[mbnum]; /* get current MB location */\n    Int firstMB = mbnum;\n    Int start_packet_header = 0;\n    Int num_bits = 0;\n    Int packet_size = encParams->ResyncPacketsize - 1;\n    Int resync_marker = ((!currVol->shortVideoHeader) && (!currVol->ResyncMarkerDisable));\n    BitstreamEncVideo *bs1 = video->bitstream1;\n    Int byteCount = 0, byteCount1 = 0, bitCount = 0;\n    Int numHeaderBits = 0;\n    approxDCT fastDCTfunction;\n    Int ncoefblck[6] = {64, 64, 64, 64, 64, 64}; /* for FastCodeMB,  5/18/2001 */\n    UChar CBP = 0;\n    Short outputMB[6][64];\n    Int k;\n    PV_STATUS(*CodeMB)(VideoEncData *, approxDCT *, Int, Int[]);\n    void (*MBVlcEncode)(VideoEncData*, Int[], void *);\n    void (*BlockCodeCoeff)(RunLevelBlock*, BitstreamEncVideo*, Int, Int, UChar);\n\n    video->QP_prev = 31;\n\n#define H263_GOB_CHANGES\n\n\n    if (video->end_of_buf) /* left-over from previous run */\n    {\n        status = BitstreamAppendPacketNoOffset(currVol->stream, bs1);\n        if (status != PV_END_OF_BUF)\n        {\n            BitstreamEncReset(bs1);\n            video->end_of_buf = 0;\n        }\n        return status;\n    }\n\n\n    if (mbnum == 0) /* only do this at the start of a frame */\n    {\n        QPMB[0] = video->QP_prev = QP = currVop->quantizer;\n        video->usePrevQP = 0;\n\n        numHeaderBits = BitstreamGetPos(bs1);\n    }\n\n    /* Re-assign fast functions on every slice, don't have to put it in the memory */\n    QP = QPMB[mbnum];\n    if (mbnum > 0)   video->QP_prev = QPMB[mbnum-1];\n\n    /* determine type of quantization   */\n#ifndef NO_MPEG_QUANT\n    if (currVol->quantType == 0)\n        CodeMB = &CodeMB_H263;\n    else\n        CodeMB = &CodeMB_MPEG;\n#else\n    CodeMB = &CodeMB_H263;\n#endif\n\n    /* determine which functions to be used, in MB-level */\n    if (currVop->predictionType == P_VOP)\n        MBVlcEncode = &MBVlcEncodeCombined_P_VOP;\n    else if (currVop->predictionType == I_VOP)\n        MBVlcEncode = &MBVlcEncodeCombined_I_VOP;\n    else /* B_VOP not implemented yet */\n        return PV_FAIL;\n\n    /* determine which VLC table to be used */\n#ifndef H263_ONLY\n    if (currVol->shortVideoHeader)\n        BlockCodeCoeff = &BlockCodeCoeff_ShortHeader;\n#ifndef NO_RVLC\n    else if (currVol->useReverseVLC)\n        BlockCodeCoeff = &BlockCodeCoeff_RVLC;\n#endif\n    else\n        BlockCodeCoeff = &BlockCodeCoeff_Normal;\n#else\n    BlockCodeCoeff = &BlockCodeCoeff_ShortHeader;\n#endif\n\n    /*  (gob_frame_id is the same for different vop types) The reason should be SCD */\n    if (currVol->shortVideoHeader && currVop->gobFrameID != currVop->predictionType)\n        currVop->gobFrameID = currVop->predictionType;\n\n\n    if (mbnum != 0)\n    {\n        if (currVol->shortVideoHeader)\n        {\n            /* Encode GOB Header */\n            bitCount = BitstreamGetPos(bs1);\n            byteCount1 = byteCount = bitCount >> 3; /* save the position before GOB header */\n            bitCount = bitCount & 0x7;\n\n#ifdef H263_GOB_CHANGES\n            video->header_bits -= BitstreamGetPos(bs1); /* Header Bits */\n            status = EncodeGOBHeader(video, slice_counter, QP, 1);  //ind_y    /* Encode GOB Header */\n            video->header_bits += BitstreamGetPos(bs1); /* Header Bits */\n#endif\n            goto JUMP_IN_SH;\n        }\n        else if (currVol->ResyncMarkerDisable)\n        {\n            goto JUMP_IN_SH;\n        }\n        else\n        {\n            start_packet_header = 1;\n            goto JUMP_IN;\n        }\n    }\n\n    for (ind_y = 0; ind_y < currVol->nMBPerCol; ind_y++)  /* Col MB Loop */\n    {\n\n        video->outputMB->mb_y = ind_y; /*  5/28/01, do not remove */\n\n        for (ind_x = 0; ind_x < currVol->nMBPerRow; ind_x++)  /* Row MB Loop */\n        {\n\n            video->outputMB->mb_x = ind_x; /*  5/28/01, do not remove */\n            video->mbnum = mbnum;\n            video->sliceNo[mbnum] = slice_counter;      /* Update MB slice number */\nJUMP_IN_SH:\n            /****************************************************************************************/\n            /* MB Prediction:Put into MC macroblock, substract from currVop, put in predMB */\n            /****************************************************************************************/\n            getMotionCompensatedMB(video, ind_x, ind_y, offset);\n\nJUMP_IN:\n            QP = QPMB[mbnum];   /* always read new QP */\n#ifndef H263_ONLY\n            if (start_packet_header)\n            {\n                slice_counter++;                        /* Increment slice counter */\n                video->sliceNo[mbnum] = slice_counter;  /* Update MB slice number*/\n                video->QP_prev = currVop->quantizer;                        /* store QP */\n                num_bits = BitstreamGetPos(bs1);\n                status = EncodeVideoPacketHeader(video, mbnum, video->QP_prev, 1);\n                numHeaderBits = BitstreamGetPos(bs1) - num_bits;\n                video->header_bits += numHeaderBits; /* Header Bits */\n                start_packet_header = 0;\n                video->usePrevQP = 0;\n            }\n            else  /* don't encode the first MB in packet again */\n#endif /* H263_ONLY */\n            {\n                /***********************************************/\n                /* Code_MB:  DCT, Q, Q^(-1), IDCT, Motion Comp */\n                /***********************************************/\n                status = (*CodeMB)(video, &fastDCTfunction, (offset << 5) + QP, ncoefblck);\n            }\n\n            /************************************/\n            /* MB VLC Encode: VLC Encode MB     */\n            /************************************/\n\n            /* save the state before VLC encoding */\n            if (resync_marker)\n            {\n                bitCount = BitstreamGetPos(bs1);\n                byteCount = bitCount >> 3; /* save the state before encoding */\n                bitCount = bitCount & 0x7;\n                mode = Mode[mbnum];\n                CBP = video->headerInfo.CBP[mbnum];\n                for (k = 0; k < 6; k++)\n                {\n                    M4VENC_MEMCPY(outputMB[k], video->outputMB->block[k], sizeof(Short) << 6);\n                }\n            }\n            /*************************************/\n\n            (*MBVlcEncode)(video, ncoefblck, (void*)BlockCodeCoeff);\n\n            /*************************************************************/\n            /* Assemble Packets:  Assemble the MB VLC codes into Packets */\n            /*************************************************************/\n\n            /* Assemble_Packet(video) */\n#ifndef H263_ONLY\n            if (!currVol->shortVideoHeader)\n            {\n                if (!currVol->ResyncMarkerDisable)\n                {\n                    /* Not in ShortVideoHeader mode and RESYNC MARKER MODE */\n\n                    num_bits = BitstreamGetPos(bs1) ;//- numHeaderBits; // include header\n\n                    /* Assemble packet and return when size reached */\n                    if (num_bits > packet_size && mbnum != firstMB)\n                    {\n\n                        BitstreamRepos(bs1, byteCount, bitCount); /* rewind one MB */\n\n                        video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1); /* Byte align Packet */\n\n                        status = BitstreamAppendPacketNoOffset(currVol->stream, bs1); /* Put Packet to Buffer */\n\n                        if (status == PV_END_OF_BUF)\n                        {\n                            video->end_of_buf = 1;\n                        }\n                        else\n                        {\n                            BitstreamEncReset(bs1);\n                        }\n\n                        start_packet_header = 1;\n\n                        if (mbnum < nTotalMB || video->end_of_buf) /* return here */\n                        {\n                            video->mbnum = mbnum;\n                            video->sliceNo[mbnum] = slice_counter;\n                            video->offset = offset;\n                            Mode[mbnum] = mode;\n                            video->headerInfo.CBP[mbnum] = CBP;\n\n                            for (k = 0; k < 6; k++)\n                            {\n                                M4VENC_MEMCPY(video->outputMB->block[k], outputMB[k], sizeof(Short) << 6);\n                            }\n\n                            return status;\n                        }\n                    }\n                }\n                else  /* NO RESYNC MARKER , return when buffer is full*/\n                {\n\n                    if (mbnum < nTotalMB - 1 && currVol->stream->byteCount + bs1->byteCount + 1 >= currVol->stream->bufferSize)\n                    {\n                        /* find maximum bytes to fit in the buffer */\n                        byteCount = currVol->stream->bufferSize - currVol->stream->byteCount - 1;\n\n                        num_bits = BitstreamGetPos(bs1) - (byteCount << 3);\n                        BitstreamRepos(bs1, byteCount, 0);\n                        status = BitstreamAppendPacketNoOffset(currVol->stream, bs1);\n                        BitstreamFlushBits(bs1, num_bits);\n\n                        /* move on to next MB */\n                        mbnum++ ;\n                        offset += 16;\n                        video->outputMB->mb_x++;\n                        if (video->outputMB->mb_x >= currVol->nMBPerRow)\n                        {\n                            video->outputMB->mb_x = 0;\n                            video->outputMB->mb_y++;\n                            offset += (lx << 4) - width;\n                        }\n                        video->mbnum = mbnum;\n                        video->offset = offset;\n                        video->sliceNo[mbnum] = slice_counter;\n                        return status;\n                    }\n                }\n            }\n#endif /* H263_ONLY */\n            offset += 16;\n            mbnum++; /* has to increment before SCD, to preserve Mode[mbnum] */\n\n        } /* End of For ind_x */\n\n        offset += (lx << 4) - width;\n\n        if (currVol->shortVideoHeader)  /* ShortVideoHeader = 1 */\n        {\n#ifdef H263_GOB_CHANGES\n            slice_counter++;\n            video->header_bits += BitstreamShortHeaderByteAlignStuffing(bs1);\n#endif\n            //video->header_bits+=BitstreamShortHeaderByteAlignStuffing(bs1);\n\n            /* check if time to packetize */\n            if (currVol->stream->byteCount + bs1->byteCount > currVol->stream->bufferSize)\n            {\n                if (byteCount == byteCount1) /* a single GOB bigger than packet size */\n                {\n                    status = BitstreamAppendPacketNoOffset(currVol->stream, bs1);\n                    status = PV_END_OF_BUF;\n                    video->end_of_buf = 1;\n                    start_packet_header = 1;\n                }\n                else    /* for short_header scooch back to previous GOB */\n                {\n                    num_bits = ((bs1->byteCount - byteCount) << 3);\n                    //num_bits = ((bs1->byteCount<<3) + bs1->bitCount) - ((byteCount<<3) + bitCount);\n                    BitstreamRepos(bs1, byteCount, 0);\n                    //BitstreamRepos(bs1,byteCount,bitCount);\n//                  k = currVol->stream->byteCount; /* save state before appending */\n                    status = BitstreamAppendPacketNoOffset(currVol->stream, bs1);\n                    BitstreamFlushBits(bs1, num_bits);\n//                  if(mbnum == nTotalMB || k + bs1->byteCount >= currVol->stream->bufferSize){\n                    /* last GOB or current one with larger size will be returned next run */\n//                      status = PV_END_OF_BUF;\n//                      video->end_of_buf = 1;\n//                  }\n                    start_packet_header = 1;\n                    if (mbnum == nTotalMB) /* there's one more GOB to packetize for the next round */\n                    {\n                        status = PV_END_OF_BUF;\n                        video->end_of_buf = 1;\n                    }\n                }\n\n                if (mbnum < nTotalMB) /* return here */\n                {\n                    /* move on to next MB */\n                    video->outputMB->mb_x = 0;\n                    video->outputMB->mb_y++;\n                    video->mbnum = mbnum;\n                    video->offset = offset;\n                    video->sliceNo[mbnum] = slice_counter;\n                    return status;\n                }\n            }\n            else if (mbnum < nTotalMB) /* do not write GOB header if end of vop */\n            {\n                bitCount = BitstreamGetPos(bs1);\n                byteCount = bitCount >> 3;  /* save the position before GOB header */\n                bitCount = bitCount & 0x7;\n#ifdef H263_GOB_CHANGES\n                video->header_bits -= BitstreamGetPos(bs1); /* Header Bits */\n                status = EncodeGOBHeader(video, slice_counter, QP, 1);         /* Encode GOB Header */\n                video->header_bits += BitstreamGetPos(bs1); /* Header Bits */\n#endif\n            }\n        }\n\n    } /* End of For ind_y */\n#ifndef H263_ONLY\n    if (!currVol->shortVideoHeader) /* Combined Mode*/\n    {\n        if (!currVol->ResyncMarkerDisable) /* Resync Markers */\n        {\n\n            if (!start_packet_header)\n            {\n\n                video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1);/* Byte Align  */\n\n                status = BitstreamAppendPacketNoOffset(currVol->stream, bs1);   /* Put Packet to Buffer */\n                if (status == PV_END_OF_BUF)\n                {\n                    video->end_of_buf = 1;\n                }\n                else\n                {\n                    BitstreamEncReset(bs1);\n                }\n            }\n        }\n        else   /* No Resync Markers */\n        {\n            video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1); /* Byte Align */\n            status = BitstreamAppendPacketNoOffset(currVol->stream, bs1); /* Initialize to 0 */\n            if (status == PV_END_OF_BUF)\n            {\n                video->end_of_buf = 1;\n            }\n            else\n            {\n                BitstreamEncReset(bs1);\n            }\n        }\n    }\n    else\n#endif /* H263_ONLY */\n    {\n        if (!start_packet_header) /* not yet packetized */\n        {\n            video->header_bits += BitstreamShortHeaderByteAlignStuffing(bs1);\n            status = BitstreamAppendPacketNoOffset(currVol->stream, bs1);\n            if (status == PV_END_OF_BUF)\n            {\n                video->end_of_buf = 1;\n            }\n            else\n            {\n                BitstreamEncReset(bs1);\n                video->end_of_buf = 0;\n            }\n        }\n    }\n\n    video->mbnum = mbnum;\n    if (mbnum < nTotalMB)\n        video->sliceNo[mbnum] = slice_counter;\n    video->offset = offset;\n\n    return status;\n}\n#endif  /* NO_SLICE_ENCODE */\n\n/* ======================================================================== */\n/*  Function : EncodeGOBHeader()                                            */\n/*  Date     : 09/05/2000                                                   */\n/*  History  :                                                              */\n/*  Purpose  : Encode a frame of MPEG4 bitstream in Combined mode.          */\n/*  In/out   :                                                              */\n/*  Return   :  PV_SUCCESS if successful else PV_FAIL                       */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nPV_STATUS EncodeGOBHeader(VideoEncData *video, Int GOB_number, Int quant_scale, Int bs1stream)\n{\n    PV_STATUS status = PV_SUCCESS;\n    BitstreamEncVideo *stream = (bs1stream ? video->bitstream1 : video->vol[video->currLayer]->stream);\n\n    status = BitstreamPutGT16Bits(stream, 17, GOB_RESYNC_MARKER); /* gob_resync_marker */\n    status = BitstreamPutBits(stream, 5, GOB_number);           /* Current gob_number */\n    status = BitstreamPutBits(stream, 2, video->currVop->gobFrameID); /* gob_frame_id */\n    status = BitstreamPutBits(stream, 5, quant_scale);              /* quant_scale */\n    return status;\n}\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/datapart_encode.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef H263_ONLY\n\n#include \"mp4def.h\"\n#include \"mp4lib_int.h\"\n#include \"bitstream_io.h\"\n#include \"mp4enc_lib.h\"\n#include \"m4venc_oscl.h\"\n\n/* ======================================================================== */\n/*  Function : EncodeFrameDataPartMode()                                    */\n/*  Date     : 09/6/2000                                                    */\n/*  History  :                                                              */\n/*  Purpose  : Encode a frame of MPEG4 bitstream in datapartitioning mode.  */\n/*  In/out   :                                                              */\n/*  Return   :  PV_SUCCESS if successful else PV_FAIL                       */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\nPV_STATUS EncodeFrameDataPartMode(VideoEncData *video)\n{\n    PV_STATUS status = PV_SUCCESS;\n    Vol *currVol = video->vol[video->currLayer];\n    Vop *currVop = video->currVop;\n    VideoEncParams *encParams = video->encParams;\n    Int width = currVop->width; /* has to be Vop, for multiple of 16 */\n    Int lx = currVop->pitch; /*  with padding */\n    Int offset = 0;\n    Int ind_x, ind_y;\n    Int start_packet_header = 0;\n    UChar *QPMB = video->QPMB;\n    Int QP;\n    Int mbnum = 0, slice_counter = 0;\n    Int num_bits, packet_size = encParams->ResyncPacketsize;\n    BitstreamEncVideo *bs1 = video->bitstream1;\n    BitstreamEncVideo *bs2 = video->bitstream2;\n    BitstreamEncVideo *bs3 = video->bitstream3;\n    Int numHeaderBits;\n    approxDCT fastDCTfunction;\n    Int ncoefblck[6] = {64, 64, 64, 64, 64, 64}; /* for FastCodeMB,  5/18/2001 */\n    PV_STATUS(*CodeMB)(VideoEncData *, approxDCT *, Int, Int[]);\n    void (*MBVlcEncode)(VideoEncData*, Int[], void *);\n    void (*BlockCodeCoeff)(RunLevelBlock*, BitstreamEncVideo*, Int, Int, UChar);\n\n    video->QP_prev = currVop->quantizer;\n\n    numHeaderBits = BitstreamGetPos(bs1); /* Number of bits in VOP Header */\n\n    /* determine type of quantization   */\n#ifndef NO_MPEG_QUANT\n    if (currVol->quantType == 0)\n        CodeMB = &CodeMB_H263;\n    else\n        CodeMB = &CodeMB_MPEG;\n#else\n    CodeMB = &CodeMB_H263;\n#endif\n\n    /* determine which functions to be used, in MB-level */\n    if (currVop->predictionType == P_VOP)\n        MBVlcEncode = &MBVlcEncodeDataPar_P_VOP;\n    else if (currVop->predictionType == I_VOP)\n        MBVlcEncode = &MBVlcEncodeDataPar_I_VOP;\n    else /* B_VOP not implemented yet */\n        return PV_FAIL;\n\n    /* determine which VLC table to be used */\n    if (currVol->shortVideoHeader)\n        BlockCodeCoeff = &BlockCodeCoeff_ShortHeader;\n#ifndef NO_RVLC\n    else if (currVol->useReverseVLC)\n        BlockCodeCoeff = &BlockCodeCoeff_RVLC;\n#endif\n    else\n        BlockCodeCoeff = &BlockCodeCoeff_Normal;\n\n    video->usePrevQP = 0;\n\n    for (ind_y = 0; ind_y < currVol->nMBPerCol; ind_y++)  /* Col MB Loop */\n    {\n\n        video->outputMB->mb_y = ind_y; /*  5/28/01 */\n\n        for (ind_x = 0; ind_x < currVol->nMBPerRow; ind_x++)  /* Row MB Loop */\n        {\n            video->outputMB->mb_x = ind_x; /*  5/28/01 */\n            video->mbnum = mbnum;\n            video->sliceNo[mbnum] = slice_counter;      /* Update MB slice number */\n            QP = QPMB[mbnum];   /* always read new QP */\n\n            /****************************************************************************************/\n            /* MB Prediction:Put into MC macroblock, substract from currVop, put in predMB */\n            /****************************************************************************************/\n\n            getMotionCompensatedMB(video, ind_x, ind_y, offset);\n\n            if (start_packet_header)\n            {\n                slice_counter++;                        /* Increment slice counter */\n                video->sliceNo[mbnum] = slice_counter;  /* Update MB slice number*/\n                video->header_bits -= BitstreamGetPos(bs1); /* Header Bits */\n                video->QP_prev = currVop->quantizer;                        /* store QP */\n                status = EncodeVideoPacketHeader(video, mbnum, video->QP_prev, 0);\n                video->header_bits += BitstreamGetPos(bs1); /* Header Bits */\n                numHeaderBits = BitstreamGetPos(bs1);\n                start_packet_header = 0;\n                video->usePrevQP = 0;\n            }\n\n            /***********************************************/\n            /* Code_MB:  DCT, Q, Q^(-1), IDCT, Motion Comp */\n            /***********************************************/\n\n            status = (*CodeMB)(video, &fastDCTfunction, (offset << 5) + QP, ncoefblck);\n\n            /************************************/\n            /* MB VLC Encode: VLC Encode MB     */\n            /************************************/\n\n            MBVlcEncode(video, ncoefblck, (void*)BlockCodeCoeff);\n\n            /*************************************************************/\n            /* Assemble Packets:  Assemble the MB VLC codes into Packets */\n            /*************************************************************/\n\n            /* INCLUDE VOP HEADER IN COUNT */\n\n            num_bits = BitstreamGetPos(bs1) + BitstreamGetPos(bs2) +\n                       BitstreamGetPos(bs3) - numHeaderBits;\n\n            /* Assemble_Packet(video) */\n\n            if (num_bits > packet_size)\n            {\n                if (video->currVop->predictionType == I_VOP)\n                    BitstreamPutGT16Bits(bs1, 19, DC_MARKER);   /* Add dc_marker */\n                else\n                    BitstreamPutGT16Bits(bs1, 17, MOTION_MARKER_COMB); /*Add motion_marker*/\n                BitstreamAppendEnc(bs1, bs2);   /* Combine bs1 and bs2 */\n                BitstreamAppendEnc(bs1, bs3);   /* Combine bs1 and bs3 */\n                video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1); /* Byte align Packet */\n\n                status = BitstreamAppendPacket(currVol->stream, bs1); /* Put Packet to Buffer */\n                /* continue even if status == PV_END_OF_BUF, to get the stats */\n\n                BitstreamEncReset(bs1); /* Initialize to 0 */\n                BitstreamEncReset(bs2);\n                BitstreamEncReset(bs3);\n                start_packet_header = 1;\n            }\n            mbnum++;\n            offset += 16;\n        } /* End of For ind_x */\n\n        offset += (lx << 4) - width;\n    } /* End of For ind_y */\n\n    if (!start_packet_header)\n    {\n        if (video->currVop->predictionType == I_VOP)\n        {\n            BitstreamPutGT16Bits(bs1, 19, DC_MARKER);   /* Add dc_marker */\n            video->header_bits += 19;\n        }\n        else\n        {\n            BitstreamPutGT16Bits(bs1, 17, MOTION_MARKER_COMB); /* Add motion_marker */\n            video->header_bits += 17;\n        }\n        BitstreamAppendEnc(bs1, bs2);\n        BitstreamAppendEnc(bs1, bs3);\n        video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1); /* Byte align Packet */\n        status = BitstreamAppendPacket(currVol->stream, bs1); /* Put Packet to Buffer */\n        /* continue even if status == PV_END_OF_BUF, to get the stats */\n        BitstreamEncReset(bs1); /* Initialize to 0 */\n        BitstreamEncReset(bs2);\n        BitstreamEncReset(bs3);\n    }\n\n    return status; /* if status == PV_END_OF_BUF, this frame will be pre-skipped */\n}\n\n#ifndef  NO_SLICE_ENCODE\n/* ======================================================================== */\n/*  Function : EncodeSliceDataPartMode()                                    */\n/*  Date     : 04/19/2002                                                   */\n/*  History  :                                                              */\n/*  Purpose  : Encode a slice of MPEG4 bitstream in DataPar mode and save   */\n/*              the current MB to continue next time it is called.          */\n/*  In/out   :                                                              */\n/*  Return   :  PV_SUCCESS if successful else PV_FAIL                       */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\nPV_STATUS EncodeSliceDataPartMode(VideoEncData *video)\n{\n    PV_STATUS status = PV_SUCCESS;\n    Vol *currVol = video->vol[video->currLayer];\n    Vop *currVop = video->currVop;\n    UChar mode, *Mode = video->headerInfo.Mode;\n    VideoEncParams *encParams = video->encParams;\n    Int nTotalMB = currVol->nTotalMB;\n    Int width = currVop->width; /* has to be Vop, for multiple of 16 */\n    Int lx = currVop->pitch; /* , with pading */\n    UChar *QPMB = video->QPMB;\n    Int QP;\n    Int ind_x = video->outputMB->mb_x, ind_y = video->outputMB->mb_y;\n    Int offset = video->offset;                 /* get current MB location */\n    Int mbnum = video->mbnum, slice_counter = video->sliceNo[mbnum]; /* get current MB location */\n    Int firstMB = mbnum;\n    Int start_packet_header = (mbnum != 0);\n    Int num_bits = 0;\n    Int packet_size = encParams->ResyncPacketsize - 1 - (currVop->predictionType == I_VOP ? 19 : 17);\n    BitstreamEncVideo *bs1 = video->bitstream1;\n    BitstreamEncVideo *bs2 = video->bitstream2;\n    BitstreamEncVideo *bs3 = video->bitstream3;\n    Int bitCount1 = 0, bitCount2 = 0, bitCount3 = 0, byteCount1 = 0, byteCount2 = 0, byteCount3 = 0;\n    Int numHeaderBits = 0;\n    approxDCT fastDCTfunction;\n    Int ncoefblck[6] = {64, 64, 64, 64, 64, 64}; /* for FastCodeMB,  5/18/2001 */\n    UChar CBP;\n    Short outputMB[6][64];\n    PV_STATUS(*CodeMB)(VideoEncData *, approxDCT *, Int, Int[]);\n    void (*MBVlcEncode)(VideoEncData*, Int[], void *);\n    void (*BlockCodeCoeff)(RunLevelBlock*, BitstreamEncVideo*, Int, Int, UChar);\n    Int k;\n\n    video->QP_prev = 31;\n\n    if (video->end_of_buf) /* left-over from previous run */\n    {\n        status = BitstreamAppendPacketNoOffset(currVol->stream, bs1);\n        if (status != PV_END_OF_BUF)\n        {\n            BitstreamEncReset(bs1);\n            video->end_of_buf = 0;\n        }\n        return status;\n    }\n\n    if (mbnum == 0) /* only do this at the start of a frame */\n    {\n        QPMB[0] = video->QP_prev = QP = currVop->quantizer;\n        video->usePrevQP = 0;\n\n        numHeaderBits = BitstreamGetPos(bs1); /* Number of bits in VOP Header */\n\n    }\n\n\n    /* Re-assign fast functions on every slice, don't have to put it in the memory */\n    QP = QPMB[mbnum];\n    if (mbnum > 0)   video->QP_prev = QPMB[mbnum-1];\n\n    /* determine type of quantization   */\n#ifndef NO_MPEG_QUANT\n    if (currVol->quantType == 0)\n        CodeMB = &CodeMB_H263;\n    else\n        CodeMB = &CodeMB_MPEG;\n#else\n    CodeMB = &CodeMB_H263;\n#endif\n\n    /* determine which functions to be used, in MB-level */\n    if (currVop->predictionType == P_VOP)\n        MBVlcEncode = &MBVlcEncodeDataPar_P_VOP;\n    else if (currVop->predictionType == I_VOP)\n        MBVlcEncode = &MBVlcEncodeDataPar_I_VOP;\n    else /* B_VOP not implemented yet */\n        return PV_FAIL;\n\n    /* determine which VLC table to be used */\n#ifndef NO_RVLC\n    if (currVol->useReverseVLC)\n        BlockCodeCoeff = &BlockCodeCoeff_RVLC;\n    else\n#endif\n        BlockCodeCoeff = &BlockCodeCoeff_Normal;\n\n    if (mbnum != 0)\n    {\n        goto JUMP_IN;\n    }\n\n    for (ind_y = 0; ind_y < currVol->nMBPerCol; ind_y++)  /* Col MB Loop */\n    {\n\n        video->outputMB->mb_y = ind_y; /*  5/28/01 */\n\n        for (ind_x = 0; ind_x < currVol->nMBPerRow; ind_x++)  /* Row MB Loop */\n        {\n\n            video->outputMB->mb_x = ind_x; /*  5/28/01 */\n            video->mbnum = mbnum;\n            video->sliceNo[mbnum] = slice_counter;      /* Update MB slice number */\n\n            /****************************************************************************************/\n            /* MB Prediction:Put into MC macroblock, substract from currVop, put in predMB */\n            /****************************************************************************************/\n            getMotionCompensatedMB(video, ind_x, ind_y, offset);\n\nJUMP_IN:\n\n            QP = QPMB[mbnum];   /* always read new QP */\n\n            if (start_packet_header)\n            {\n                slice_counter++;                        /* Increment slice counter */\n                video->sliceNo[mbnum] = slice_counter;  /* Update MB slice number*/\n                video->QP_prev = currVop->quantizer;                        /* store QP */\n                num_bits = BitstreamGetPos(bs1);\n                status = EncodeVideoPacketHeader(video, mbnum, video->QP_prev, 0);\n                numHeaderBits = BitstreamGetPos(bs1) - num_bits;\n                video->header_bits += numHeaderBits; /* Header Bits */\n                start_packet_header = 0;\n                video->usePrevQP = 0;\n            }\n            else  /* don't encode the first MB in packet again */\n            {\n                /***********************************************/\n                /* Code_MB:  DCT, Q, Q^(-1), IDCT, Motion Comp */\n                /***********************************************/\n\n                status = (*CodeMB)(video, &fastDCTfunction, (offset << 5) + QP, ncoefblck);\n                for (k = 0; k < 6; k++)\n                {\n                    M4VENC_MEMCPY(outputMB[k], video->outputMB->block[k], sizeof(Short) << 6);\n                }\n            }\n\n            /************************************/\n            /* MB VLC Encode: VLC Encode MB     */\n            /************************************/\n\n            /* save the state before VLC encoding */\n            bitCount1 = BitstreamGetPos(bs1);\n            bitCount2 = BitstreamGetPos(bs2);\n            bitCount3 = BitstreamGetPos(bs3);\n            byteCount1 = bitCount1 >> 3;\n            byteCount2 = bitCount2 >> 3;\n            byteCount3 = bitCount3 >> 3;\n            bitCount1 &= 0x7;\n            bitCount2 &= 0x7;\n            bitCount3 &= 0x7;\n            mode = Mode[mbnum];\n            CBP = video->headerInfo.CBP[mbnum];\n\n            /*************************************/\n\n            MBVlcEncode(video, ncoefblck, (void*)BlockCodeCoeff);\n\n            /*************************************************************/\n            /* Assemble Packets:  Assemble the MB VLC codes into Packets */\n            /*************************************************************/\n\n            num_bits = BitstreamGetPos(bs1) + BitstreamGetPos(bs2) +\n                       BitstreamGetPos(bs3);// - numHeaderBits; //include header bits\n\n            /* Assemble_Packet(video) */\n            if (num_bits > packet_size && mbnum != firstMB)  /* encoding at least one more MB*/\n            {\n\n                BitstreamRepos(bs1, byteCount1, bitCount1); /* rewind one MB */\n                BitstreamRepos(bs2, byteCount2, bitCount2); /* rewind one MB */\n                BitstreamRepos(bs3, byteCount3, bitCount3); /* rewind one MB */\n\n                if (video->currVop->predictionType == I_VOP)\n                {\n                    BitstreamPutGT16Bits(bs1, 19, DC_MARKER);   /* Add dc_marker */\n                    video->header_bits += 19;\n                }\n                else\n                {\n                    BitstreamPutGT16Bits(bs1, 17, MOTION_MARKER_COMB); /*Add motion_marker*/\n                    video->header_bits += 17;\n                }\n\n                status = BitstreamAppendEnc(bs1, bs2);  /* Combine with bs2 */\n                status = BitstreamAppendEnc(bs1, bs3);  /* Combine with bs3 */\n\n                video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1); /* Byte align Packet */\n                status = BitstreamAppendPacketNoOffset(currVol->stream, bs1);\n\n                BitstreamEncReset(bs2);\n                BitstreamEncReset(bs3);\n\n                if (status == PV_END_OF_BUF) /* if cannot fit a buffer */\n                {\n                    video->end_of_buf = 1;\n                }\n                else\n                {\n                    BitstreamEncReset(bs1);\n                }\n\n                start_packet_header = 1;\n\n                if (mbnum < nTotalMB || video->end_of_buf) /* return here */\n                {\n                    video->mbnum = mbnum;\n                    video->sliceNo[mbnum] = slice_counter;\n                    video->offset = offset;\n                    Mode[mbnum] = mode;\n                    video->headerInfo.CBP[mbnum] = CBP;\n\n                    for (k = 0; k < 6; k++)\n                    {\n                        M4VENC_MEMCPY(video->outputMB->block[k], outputMB[k], sizeof(Short) << 6);\n                    }\n\n                    return status;\n                }\n            }\n\n            offset += 16;\n            mbnum++; /* has to increment before SCD, to preserve Mode[mbnum] */\n        } /* End of For ind_x */\n\n        offset += (lx << 4) - width;\n\n    } /* End of For ind_y */\n\n    if (!start_packet_header)\n    {\n        if (video->currVop->predictionType == I_VOP)\n        {\n            BitstreamPutGT16Bits(bs1, 19, DC_MARKER);   /* Add dc_marker */\n            video->header_bits += 19;\n        }\n        else\n        {\n            BitstreamPutGT16Bits(bs1, 17, MOTION_MARKER_COMB); /*Add motion_marker*/\n            video->header_bits += 17;\n        }\n\n        status = BitstreamAppendEnc(bs1, bs2);  /* Combine with bs2 */\n        status = BitstreamAppendEnc(bs1, bs3);  /* Combine with bs3 */\n\n        video->header_bits += BitstreamMpeg4ByteAlignStuffing(bs1); /* Byte align Packet */\n        status = BitstreamAppendPacketNoOffset(currVol->stream, bs1);\n\n        BitstreamEncReset(bs2);\n        BitstreamEncReset(bs3);\n\n        if (status == PV_END_OF_BUF)\n        {\n            video->end_of_buf = 1;\n        }\n        else\n        {\n            BitstreamEncReset(bs1);\n        }\n    }\n\n    video->mbnum = mbnum;\n    if (mbnum < nTotalMB)\n        video->sliceNo[mbnum] = slice_counter;\n    video->offset = offset;\n\n    return status;\n}\n#endif /* NO_SLICE_ENCODE */\n#endif /* H263_ONLY */\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/dct.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"oscl_base_macros.h\" // for OSCL_UNUSED_ARG\n#include \"mp4enc_lib.h\"\n#include \"mp4lib_int.h\"\n#include \"dct_inline.h\"\n\n#define FDCT_SHIFT 10\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n    /**************************************************************************/\n    /*  Function:   BlockDCT_AANwSub\n        Date:       7/31/01\n        Input:\n        Output:     out[64] ==> next block\n        Purpose:    Do subtraction for zero MV first\n        Modified:\n    **************************************************************************/\n\n    Void BlockDCT_AANwSub(Short *out, UChar *cur, UChar *pred, Int width)\n    {\n        Short *dst;\n        Int k0, k1, k2, k3, k4, k5, k6, k7;\n        Int round;\n        Int k12 = 0x022A02D4;\n        Int k14 = 0x0188053A;\n        Int abs_sum;\n        Int mask;\n        Int tmp, tmp2;\n        Int ColTh;\n\n        dst = out + 64 ;\n        ColTh = *dst;\n        out += 128;\n        round = 1 << (FDCT_SHIFT - 1);\n\n        do  /* fdct_nextrow */\n        {\n            /* assuming the block is word-aligned */\n            mask = 0x1FE;\n            tmp = *((Int*) cur);    /* contains 4 pixels */\n            tmp2 = *((Int*) pred); /* prediction 4 pixels */\n            k0 = tmp2 & 0xFF;\n            k1 = mask & (tmp << 1);\n            k0 = k1 - (k0 << 1);\n            k1 = (tmp2 >> 8) & 0xFF;\n            k2 = mask & (tmp >> 7);\n            k1 = k2 - (k1 << 1);\n            k2 = (tmp2 >> 16) & 0xFF;\n            k3 = mask & (tmp >> 15);\n            k2 = k3 - (k2 << 1);\n            k3 = (tmp2 >> 24) & 0xFF;\n            k4 = mask & (tmp >> 23);\n            k3 = k4 - (k3 << 1);\n            tmp = *((Int*)(cur + 4));   /* another 4 pixels */\n            tmp2 = *((Int*)(pred + 4));\n            k4 = tmp2 & 0xFF;\n            k5 = mask & (tmp << 1);\n            k4 = k5 - (k4 << 1);\n            k5 = (tmp2 >> 8) & 0xFF;\n            k6 = mask & (tmp >> 7);\n            k5 = k6 - (k5 << 1);\n            k6 = (tmp2 >> 16) & 0xFF;\n            k7 = mask & (tmp >> 15);\n            k6 = k7 - (k6 << 1);\n            k7 = (tmp2 >> 24) & 0xFF;\n            tmp = mask & (tmp >> 23);\n            k7 = tmp - (k7 << 1);\n            cur += width;\n            pred += 16;\n\n            /* fdct_1 */\n            k0 = k0 + k7;\n            k7 = k0 - (k7 << 1);\n            k1 = k1 + k6;\n            k6 = k1 - (k6 << 1);\n            k2 = k2 + k5;\n            k5 = k2 - (k5 << 1);\n            k3 = k3 + k4;\n            k4 = k3 - (k4 << 1);\n\n            k0 = k0 + k3;\n            k3 = k0 - (k3 << 1);\n            k1 = k1 + k2;\n            k2 = k1 - (k2 << 1);\n\n            k0 = k0 + k1;\n            k1 = k0 - (k1 << 1);\n            /**********/\n            dst[0] = k0;\n            dst[4] = k1; /* col. 4 */\n            /* fdct_2 */\n            k4 = k4 + k5;\n            k5 = k5 + k6;\n            k6 = k6 + k7;\n            k2 = k2 + k3;\n            /* MUL2C k2,k5,724,FDCT_SHIFT */\n            /* k0, k1 become scratch */\n            /* assume FAST MULTIPLY */\n            k1 = mla724(k12, k5, round);\n            k0 = mla724(k12, k2, round);\n\n            k5 = k1 >> FDCT_SHIFT;\n            k2 = k0 >> FDCT_SHIFT;\n            /*****************/\n            k2 = k2 + k3;\n            k3 = (k3 << 1) - k2;\n            /********/\n            dst[2] = k2;        /* col. 2 */\n            k3 <<= 1;       /* scale up col. 6 */\n            dst[6] = k3; /* col. 6 */\n            /* fdct_3 */\n            /* ROTATE k4,k6,392,946, FDCT_SHIFT */\n            /* assume FAST MULTIPLY */\n            /* k0, k1 are output */\n            k0 = k4 - k6;\n\n            k1 = mla392(k0, k14, round);\n            k0 = mla554(k4, k12, k1);\n            k1 = mla1338(k6, k14, k1);\n\n            k4 = k0 >> FDCT_SHIFT;\n            k6 = k1 >> FDCT_SHIFT;\n            /***********************/\n            k5 = k5 + k7;\n            k7 = (k7 << 1) - k5;\n            k4 = k4 + k7;\n            k7 = (k7 << 1) - k4;\n            k5 = k5 + k6;\n            k4 <<= 1;       /* scale up col.5 */\n            k6 = k5 - (k6 << 1);\n            /********/\n            dst[5] = k4;    /* col. 5 */\n            k6 <<= 2;       /* scale up col. 7 */\n            dst[1] = k5;    /* col. 1 */\n            dst[7] = k6;    /* col. 7 */\n            dst[3] = k7;    /* col. 3 */\n            dst += 8;\n        }\n        while (dst < out);\n\n        out -= 64;\n        dst = out + 8;\n\n        /*  Vertical Block Loop  */\n        do  /* Vertical 8xDCT loop */\n        {\n            k0 = out[0];\n            k1 = out[8];\n            k2 = out[16];\n            k3 = out[24];\n            k4 = out[32];\n            k5 = out[40];\n            k6 = out[48];\n            k7 = out[56];\n            /* deadzone thresholding for column */\n\n            abs_sum = sum_abs(k0, k1, k2, k3, k4, k5, k6, k7);\n\n            if (abs_sum < ColTh)\n            {\n                out[0] = 0x7fff;\n                out++;\n                continue;\n            }\n\n            /* fdct_1 */\n            k0 = k0 + k7;\n            k7 = k0 - (k7 << 1);\n            k1 = k1 + k6;\n            k6 = k1 - (k6 << 1);\n            k2 = k2 + k5;\n            k5 = k2 - (k5 << 1);\n            k3 = k3 + k4;\n            k4 = k3 - (k4 << 1);\n\n            k0 = k0 + k3;\n            k3 = k0 - (k3 << 1);\n            k1 = k1 + k2;\n            k2 = k1 - (k2 << 1);\n\n            k0 = k0 + k1;\n            k1 = k0 - (k1 << 1);\n            /**********/\n            out[32] = k1; /* row 4 */\n            out[0] = k0; /* row 0 */\n            /* fdct_2 */\n            k4 = k4 + k5;\n            k5 = k5 + k6;\n            k6 = k6 + k7;\n            k2 = k2 + k3;\n            /* MUL2C k2,k5,724,FDCT_SHIFT */\n            /* k0, k1 become scratch */\n            /* assume FAST MULTIPLY */\n            k1 = mla724(k12, k5, round);\n            k0 = mla724(k12, k2, round);\n\n            k5 = k1 >> FDCT_SHIFT;\n            k2 = k0 >> FDCT_SHIFT;\n            /*****************/\n            k2 = k2 + k3;\n            k3 = (k3 << 1) - k2;\n            k3 <<= 1;       /* scale up col. 6 */\n            /********/\n            out[48] = k3;   /* row 6 */\n            out[16] = k2;   /* row 2 */\n            /* fdct_3 */\n            /* ROTATE k4,k6,392,946, FDCT_SHIFT */\n            /* assume FAST MULTIPLY */\n            /* k0, k1 are output */\n            k0 = k4 - k6;\n\n            k1 = mla392(k0, k14, round);\n            k0 = mla554(k4, k12, k1);\n            k1 = mla1338(k6, k14, k1);\n\n            k4 = k0 >> FDCT_SHIFT;\n            k6 = k1 >> FDCT_SHIFT;\n            /***********************/\n            k5 = k5 + k7;\n            k7 = (k7 << 1) - k5;\n            k4 = k4 + k7;\n            k7 = (k7 << 1) - k4;\n            k5 = k5 + k6;\n            k4 <<= 1;       /* scale up col. 5 */\n            k6 = k5 - (k6 << 1);\n            /********/\n            out[24] = k7 ;    /* row 3 */\n            k6 <<= 2;       /* scale up col. 7 */\n            out[56] = k6 ;   /* row 7 */\n            out[8] = k5 ;    /* row 1 */\n            out[40] = k4 ;   /* row 5 */\n            out++;\n        }\n        while ((UInt)out < (UInt)dst) ;\n\n        return ;\n    }\n\n    /**************************************************************************/\n    /*  Function:   Block4x4DCT_AANwSub\n        Date:       7/31/01\n        Input:\n        Output:     out[64] ==> next block\n        Purpose:    Do subtraction for zero MV first before 4x4 DCT\n        Modified:\n    **************************************************************************/\n\n    Void Block4x4DCT_AANwSub(Short *out, UChar *cur, UChar *pred, Int width)\n    {\n        Short *dst;\n        register Int k0, k1, k2, k3, k4, k5, k6, k7;\n        Int round;\n        Int k12 = 0x022A02D4;\n        Int k14 = 0x0188053A;\n        Int mask;\n        Int tmp, tmp2;\n        Int abs_sum;\n        Int ColTh;\n\n        dst = out + 64 ;\n        ColTh = *dst;\n        out += 128;\n        round = 1 << (FDCT_SHIFT - 1);\n\n        do  /* fdct_nextrow */\n        {\n            /* assuming the block is word-aligned */\n            mask = 0x1FE;\n            tmp = *((Int*) cur);    /* contains 4 pixels */\n            tmp2 = *((Int*) pred); /* prediction 4 pixels */\n            k0 = tmp2 & 0xFF;\n            k1 = mask & (tmp << 1);\n            k0 = k1 - (k0 << 1);\n            k1 = (tmp2 >> 8) & 0xFF;\n            k2 = mask & (tmp >> 7);\n            k1 = k2 - (k1 << 1);\n            k2 = (tmp2 >> 16) & 0xFF;\n            k3 = mask & (tmp >> 15);\n            k2 = k3 - (k2 << 1);\n            k3 = (tmp2 >> 24) & 0xFF;\n            k4 = mask & (tmp >> 23);\n            k3 = k4 - (k3 << 1);\n            tmp = *((Int*)(cur + 4));   /* another 4 pixels */\n            tmp2 = *((Int*)(pred + 4));\n            k4 = tmp2 & 0xFF;\n            k5 = mask & (tmp << 1);\n            k4 = k5 - (k4 << 1);\n            k5 = (tmp2 >> 8) & 0xFF;\n            k6 = mask & (tmp >> 7);\n            k5 = k6 - (k5 << 1);\n            k6 = (tmp2 >> 16) & 0xFF;\n            k7 = mask & (tmp >> 15);\n            k6 = k7 - (k6 << 1);\n            k7 = (tmp2 >> 24) & 0xFF;\n            tmp = mask & (tmp >> 23);\n            k7 = tmp - (k7 << 1);\n            cur += width;\n            pred += 16;\n\n            /* fdct_1 */\n            k0 = k0 + k7;\n            k7 = k0 - (k7 << 1);\n            k1 = k1 + k6;\n            k6 = k1 - (k6 << 1);\n            k2 = k2 + k5;\n            k5 = k2 - (k5 << 1);\n            k3 = k3 + k4;\n            k4 = k3 - (k4 << 1);\n\n            k0 = k0 + k3;\n            k3 = k0 - (k3 << 1);\n            k1 = k1 + k2;\n            k2 = k1 - (k2 << 1);\n\n            k0 = k0 + k1;\n            /**********/\n            dst[0] = k0;\n            /* fdct_2 */\n            k4 = k4 + k5;\n            k5 = k5 + k6;\n            k6 = k6 + k7;\n            k2 = k2 + k3;\n            /* MUL2C k2,k5,724,FDCT_SHIFT */\n            /* k0, k1 become scratch */\n            /* assume FAST MULTIPLY */\n            k1 = mla724(k12, k5, round);\n            k0 = mla724(k12, k2, round);\n\n            k5 = k1 >> FDCT_SHIFT;\n            k2 = k0 >> FDCT_SHIFT;\n            /*****************/\n            k2 = k2 + k3;\n            /********/\n            dst[2] = k2;        /* col. 2 */\n            /* fdct_3 */\n            /* ROTATE k4,k6,392,946, FDCT_SHIFT */\n            /* assume FAST MULTIPLY */\n            /* k0, k1 are output */\n            k0 = k4 - k6;\n\n            k1 = mla392(k0, k14, round);\n            k0 = mla554(k4, k12, k1);\n            k1 = mla1338(k6, k14, k1);\n\n            k4 = k0 >> FDCT_SHIFT;\n            k6 = k1 >> FDCT_SHIFT;\n            /***********************/\n            k5 = k5 + k7;\n            k7 = (k7 << 1) - k5;\n            k7 = k7 - k4;\n            k5 = k5 + k6;\n            /********/\n            dst[1] = k5;        /* col. 1 */\n            dst[3] = k7;        /* col. 3 */\n            dst += 8;\n        }\n        while (dst < out);\n\n        out -= 64;\n        dst = out + 4;\n\n        /*  Vertical Block Loop  */\n        do  /* Vertical 8xDCT loop */\n        {\n            k0 = out[0];\n            k1 = out[8];\n            k2 = out[16];\n            k3 = out[24];\n            k4 = out[32];\n            k5 = out[40];\n            k6 = out[48];\n            k7 = out[56];\n\n            abs_sum = sum_abs(k0, k1, k2, k3, k4, k5, k6, k7);\n\n            if (abs_sum < ColTh)\n            {\n                out[0] = 0x7fff;\n                out++;\n                continue;\n            }\n            /* fdct_1 */\n            k0 = k0 + k7;\n            k7 = k0 - (k7 << 1);\n            k1 = k1 + k6;\n            k6 = k1 - (k6 << 1);\n            k2 = k2 + k5;\n            k5 = k2 - (k5 << 1);\n            k3 = k3 + k4;\n            k4 = k3 - (k4 << 1);\n\n            k0 = k0 + k3;\n            k3 = k0 - (k3 << 1);\n            k1 = k1 + k2;\n            k2 = k1 - (k2 << 1);\n\n            k0 = k0 + k1;\n            /**********/\n            out[0] = k0;   /* row 0 */\n            /* fdct_2 */\n            k4 = k4 + k5;\n            k5 = k5 + k6;\n            k6 = k6 + k7;\n            k2 = k2 + k3;\n            /* MUL2C k2,k5,724,FDCT_SHIFT */\n            /* k0, k1 become scratch */\n            /* assume FAST MULTIPLY */\n            k1 = mla724(k12, k5, round);\n            k0 = mla724(k12, k2, round);\n\n            k5 = k1 >> FDCT_SHIFT;\n            k2 = k0 >> FDCT_SHIFT;\n            /*****************/\n            k2 = k2 + k3;\n            /********/\n            out[16] = k2;           /* row 2 */\n            /* fdct_3 */\n            /* ROTATE k4,k6,392,946, FDCT_SHIFT */\n            /* assume FAST MULTIPLY */\n            /* k0, k1 are output */\n            k0 = k4 - k6;\n\n            k1 = mla392(k0, k14, round);\n            k0 = mla554(k4, k12, k1);\n            k1 = mla1338(k6, k14, k1);\n\n            k4 = k0 >> FDCT_SHIFT;\n            k6 = k1 >> FDCT_SHIFT;\n            /***********************/\n            k5 = k5 + k7;\n            k7 = (k7 << 1) - k5;\n            k7 = k7 - k4 ;\n            k5 = k5 + k6;\n            /********/\n            out[24] = k7 ;      /* row 3 */\n            out[8] = k5 ;       /* row 1 */\n            out++;\n        }\n        while ((UInt)out < (UInt)dst) ;\n\n        return ;\n    }\n\n    /**************************************************************************/\n    /*  Function:   Block2x2DCT_AANwSub\n        Date:       7/31/01\n        Input:\n        Output:     out[64] ==> next block\n        Purpose:    Do subtraction for zero MV first before 2x2 DCT\n        Modified:\n    **************************************************************************/\n\n\n    Void Block2x2DCT_AANwSub(Short *out, UChar *cur, UChar *pred, Int width)\n    {\n        Short *dst;\n        register Int k0, k1, k2, k3, k4, k5, k6, k7;\n        Int round;\n        Int k12 = 0x022A02D4;\n        Int k14 = 0x018803B2;\n        Int mask;\n        Int tmp, tmp2;\n        Int abs_sum;\n        Int ColTh;\n\n        dst = out + 64 ;\n        ColTh = *dst;\n        out += 128;\n        round = 1 << (FDCT_SHIFT - 1);\n\n        do  /* fdct_nextrow */\n        {\n            /* assuming the block is word-aligned */\n            mask = 0x1FE;\n            tmp = *((Int*) cur);    /* contains 4 pixels */\n            tmp2 = *((Int*) pred); /* prediction 4 pixels */\n            k0 = tmp2 & 0xFF;\n            k1 = mask & (tmp << 1);\n            k0 = k1 - (k0 << 1);\n            k1 = (tmp2 >> 8) & 0xFF;\n            k2 = mask & (tmp >> 7);\n            k1 = k2 - (k1 << 1);\n            k2 = (tmp2 >> 16) & 0xFF;\n            k3 = mask & (tmp >> 15);\n            k2 = k3 - (k2 << 1);\n            k3 = (tmp2 >> 24) & 0xFF;\n            k4 = mask & (tmp >> 23);\n            k3 = k4 - (k3 << 1);\n            tmp = *((Int*)(cur + 4));   /* another 4 pixels */\n            tmp2 = *((Int*)(pred + 4));\n            k4 = tmp2 & 0xFF;\n            k5 = mask & (tmp << 1);\n            k4 = k5 - (k4 << 1);\n            k5 = (tmp2 >> 8) & 0xFF;\n            k6 = mask & (tmp >> 7);\n            k5 = k6 - (k5 << 1);\n            k6 = (tmp2 >> 16) & 0xFF;\n            k7 = mask & (tmp >> 15);\n            k6 = k7 - (k6 << 1);\n            k7 = (tmp2 >> 24) & 0xFF;\n            tmp = mask & (tmp >> 23);\n            k7 = tmp - (k7 << 1);\n            cur += width;\n            pred += 16;\n\n            /* fdct_1 */\n            k0 = k0 + k7;\n            k7 = k0 - (k7 << 1);\n            k1 = k1 + k6;\n            k6 = k1 - (k6 << 1);\n            k2 = k2 + k5;\n            k5 = k2 - (k5 << 1);\n            k3 = k3 + k4;\n            k4 = k3 - (k4 << 1);\n\n            k0 = k0 + k3;\n            k3 = k0 - (k3 << 1);\n            k1 = k1 + k2;\n            k2 = k1 - (k2 << 1);\n\n            k0 = k0 + k1;\n            /**********/\n            dst[0] = k0;\n            /* fdct_2 */\n            k4 = k4 + k5;\n            k5 = k5 + k6;\n            k6 = k6 + k7;\n            /* MUL2C k2,k5,724,FDCT_SHIFT */\n            /* k0, k1 become scratch */\n            /* assume FAST MULTIPLY */\n            k1 = mla724(k12, k5, round);\n\n            k5 = k1 >> FDCT_SHIFT;\n            /*****************/\n            /********/\n            /* fdct_3 */\n            /* ROTATE k4,k6,392,946, FDCT_SHIFT */\n            /* assume FAST MULTIPLY */\n            /* k0, k1 are output */\n            k1 = mla392(k4, k14, round);\n            k1 = mla946(k6, k14, k1);\n\n            k6 = k1 >> FDCT_SHIFT;\n            /***********************/\n            k5 = k5 + k7;\n            k5 = k5 + k6;\n            /********/\n            dst[1] = k5;\n            dst += 8;\n        }\n        while (dst < out);\n        out -= 64;\n        dst = out + 2;\n        /*  Vertical Block Loop  */\n        do  /* Vertical 8xDCT loop */\n        {\n            k0 = out[0];\n            k1 = out[8];\n            k2 = out[16];\n            k3 = out[24];\n            k4 = out[32];\n            k5 = out[40];\n            k6 = out[48];\n            k7 = out[56];\n\n            abs_sum = sum_abs(k0, k1, k2, k3, k4, k5, k6, k7);\n\n            if (abs_sum < ColTh)\n            {\n                out[0] = 0x7fff;\n                out++;\n                continue;\n            }\n            /* fdct_1 */\n            k0 = k0 + k7;\n            k7 = k0 - (k7 << 1);\n            k1 = k1 + k6;\n            k6 = k1 - (k6 << 1);\n            k2 = k2 + k5;\n            k5 = k2 - (k5 << 1);\n            k3 = k3 + k4;\n            k4 = k3 - (k4 << 1);\n\n            k0 = k0 + k3;\n            k3 = k0 - (k3 << 1);\n            k1 = k1 + k2;\n            k2 = k1 - (k2 << 1);\n\n            k0 = k0 + k1;\n            /**********/\n            out[0] = k0;        /* row 0 */\n            /* fdct_2 */\n            k4 = k4 + k5;\n            k5 = k5 + k6;\n            k6 = k6 + k7;\n            /* MUL2C k2,k5,724,FDCT_SHIFT */\n            /* k0, k1 become scratch */\n            /* assume FAST MULTIPLY */\n            k1 = mla724(k12, k5, round);\n\n            k5 = k1 >> FDCT_SHIFT;\n            /*****************/\n            /********/\n            /* fdct_3 */\n            /* ROTATE k4,k6,392,946, FDCT_SHIFT */\n            /* assume FAST MULTIPLY */\n            /* k0, k1 are output */\n            k1 = mla392(k4, k14, round);\n            k1 = mla946(k6, k14, k1);\n\n            k6 = k1 >> FDCT_SHIFT;\n            /***********************/\n            k5 = k5 + k7;\n            k5 = k5 + k6;\n            /********/\n            out[8] = k5 ;       /* row 1 */\n            out++;\n        }\n        while ((UInt)out < (UInt)dst) ;\n\n        return ;\n    }\n\n    /**************************************************************************/\n    /*  Function:   BlockDCT_AANIntra\n        Date:       8/9/01\n        Input:      rec\n        Output:     out[64] ==> next block\n        Purpose:    Input directly from rec frame.\n        Modified:\n    **************************************************************************/\n\n    Void BlockDCT_AANIntra(Short *out, UChar *cur, UChar *dummy2, Int width)\n    {\n        Short *dst;\n        Int k0, k1, k2, k3, k4, k5, k6, k7;\n        Int round;\n        Int k12 = 0x022A02D4;\n        Int k14 = 0x0188053A;\n        Int abs_sum;\n        Int mask;\n        Int *curInt, tmp;\n        Int ColTh;\n\n        OSCL_UNUSED_ARG(dummy2);\n\n        dst = out + 64 ;\n        ColTh = *dst;\n        out += 128;\n        round = 1 << (FDCT_SHIFT - 1);\n\n        do  /* fdct_nextrow */\n        {\n            mask = 0x1FE;\n            curInt = (Int*) cur;\n            tmp = curInt[0];    /* contains 4 pixels */\n            k0 = mask & (tmp << 1);\n            k1 = mask & (tmp >> 7);\n            k2 = mask & (tmp >> 15);\n            k3 = mask & (tmp >> 23);\n            tmp = curInt[1];    /* another 4 pixels */\n            k4 =  mask & (tmp << 1);\n            k5 =  mask & (tmp >> 7);\n            k6 =  mask & (tmp >> 15);\n            k7 =  mask & (tmp >> 23);\n            cur += width;\n            /* fdct_1 */\n            k0 = k0 + k7;\n            k7 = k0 - (k7 << 1);\n            k1 = k1 + k6;\n            k6 = k1 - (k6 << 1);\n            k2 = k2 + k5;\n            k5 = k2 - (k5 << 1);\n            k3 = k3 + k4;\n            k4 = k3 - (k4 << 1);\n\n            k0 = k0 + k3;\n            k3 = k0 - (k3 << 1);\n            k1 = k1 + k2;\n            k2 = k1 - (k2 << 1);\n\n            k0 = k0 + k1;\n            k1 = k0 - (k1 << 1);\n            /**********/\n            dst[0] = k0;\n            dst[4] = k1; /* col. 4 */\n            /* fdct_2 */\n            k4 = k4 + k5;\n            k5 = k5 + k6;\n            k6 = k6 + k7;\n            k2 = k2 + k3;\n            /* MUL2C k2,k5,724,FDCT_SHIFT */\n            /* k0, k1 become scratch */\n            /* assume FAST MULTIPLY */\n            k1 = mla724(k12, k5, round);\n            k0 = mla724(k12, k2, round);\n\n            k5 = k1 >> FDCT_SHIFT;\n            k2 = k0 >> FDCT_SHIFT;\n            /*****************/\n            k2 = k2 + k3;\n            k3 = (k3 << 1) - k2;\n            /********/\n            dst[2] = k2;        /* col. 2 */\n            k3 <<= 1;       /* scale up col. 6 */\n            dst[6] = k3; /* col. 6 */\n            /* fdct_3 */\n            /* ROTATE k4,k6,392,946, FDCT_SHIFT */\n            /* assume FAST MULTIPLY */\n            /* k0, k1 are output */\n            k0 = k4 - k6;\n\n            k1 = mla392(k0, k14, round);\n            k0 = mla554(k4, k12, k1);\n            k1 = mla1338(k6, k14, k1);\n\n            k4 = k0 >> FDCT_SHIFT;\n            k6 = k1 >> FDCT_SHIFT;\n            /***********************/\n            k5 = k5 + k7;\n            k7 = (k7 << 1) - k5;\n            k4 = k4 + k7;\n            k7 = (k7 << 1) - k4;\n            k5 = k5 + k6;\n            k4 <<= 1;       /* scale up col.5 */\n            k6 = k5 - (k6 << 1);\n            /********/\n            dst[5] = k4;    /* col. 5 */\n            k6 <<= 2;       /* scale up col. 7 */\n            dst[1] = k5;    /* col. 1 */\n            dst[7] = k6;    /* col. 7 */\n            dst[3] = k7;    /* col. 3 */\n            dst += 8;\n        }\n        while (dst < out);\n\n        out -= 64;\n        dst = out + 8;\n\n        /*  Vertical Block Loop  */\n        do  /* Vertical 8xDCT loop */\n        {\n            k0 = out[0];\n            k1 = out[8];\n            k2 = out[16];\n            k3 = out[24];\n            k4 = out[32];\n            k5 = out[40];\n            k6 = out[48];\n            k7 = out[56];\n            /* deadzone thresholding for column */\n\n            abs_sum = sum_abs(k0, k1, k2, k3, k4, k5, k6, k7);\n\n            if (abs_sum < ColTh)\n            {\n                out[0] = 0x7fff;\n                out++;\n                continue;\n            }\n\n            /* fdct_1 */\n            k0 = k0 + k7;\n            k7 = k0 - (k7 << 1);\n            k1 = k1 + k6;\n            k6 = k1 - (k6 << 1);\n            k2 = k2 + k5;\n            k5 = k2 - (k5 << 1);\n            k3 = k3 + k4;\n            k4 = k3 - (k4 << 1);\n\n            k0 = k0 + k3;\n            k3 = k0 - (k3 << 1);\n            k1 = k1 + k2;\n            k2 = k1 - (k2 << 1);\n\n            k0 = k0 + k1;\n            k1 = k0 - (k1 << 1);\n            /**********/\n            out[32] = k1; /* row 4 */\n            out[0] = k0; /* row 0 */\n            /* fdct_2 */\n            k4 = k4 + k5;\n            k5 = k5 + k6;\n            k6 = k6 + k7;\n            k2 = k2 + k3;\n            /* MUL2C k2,k5,724,FDCT_SHIFT */\n            /* k0, k1 become scratch */\n            /* assume FAST MULTIPLY */\n            k1 = mla724(k12, k5, round);\n            k0 = mla724(k12, k2, round);\n\n            k5 = k1 >> FDCT_SHIFT;\n            k2 = k0 >> FDCT_SHIFT;\n            /*****************/\n            k2 = k2 + k3;\n            k3 = (k3 << 1) - k2;\n            k3 <<= 1;       /* scale up col. 6 */\n            /********/\n            out[48] = k3;   /* row 6 */\n            out[16] = k2;   /* row 2 */\n            /* fdct_3 */\n            /* ROTATE k4,k6,392,946, FDCT_SHIFT */\n            /* assume FAST MULTIPLY */\n            /* k0, k1 are output */\n            k0 = k4 - k6;\n\n            k1 = mla392(k0, k14, round);\n            k0 = mla554(k4, k12, k1);\n            k1 = mla1338(k6, k14, k1);\n\n            k4 = k0 >> FDCT_SHIFT;\n            k6 = k1 >> FDCT_SHIFT;\n            /***********************/\n            k5 = k5 + k7;\n            k7 = (k7 << 1) - k5;\n            k4 = k4 + k7;\n            k7 = (k7 << 1) - k4;\n            k5 = k5 + k6;\n            k4 <<= 1;       /* scale up col. 5 */\n            k6 = k5 - (k6 << 1);\n            /********/\n            out[24] = k7 ;    /* row 3 */\n            k6 <<= 2;       /* scale up col. 7 */\n            out[56] = k6 ;   /* row 7 */\n            out[8] = k5 ;    /* row 1 */\n            out[40] = k4 ;   /* row 5 */\n            out++;\n        }\n        while ((UInt)out < (UInt)dst) ;\n\n        return ;\n    }\n\n    /**************************************************************************/\n    /*  Function:   Block4x4DCT_AANIntra\n        Date:       8/9/01\n        Input:      prev\n        Output:     out[64] ==> next block\n        Purpose:    Input directly from prev frame. output 2x2 DCT\n        Modified:\n    **************************************************************************/\n\n    Void Block4x4DCT_AANIntra(Short *out, UChar *cur, UChar *dummy2, Int width)\n    {\n        Short *dst;\n        register Int k0, k1, k2, k3, k4, k5, k6, k7;\n        Int round;\n        Int k12 = 0x022A02D4;\n        Int k14 = 0x0188053A;\n        Int mask;\n        Int *curInt, tmp;\n        Int abs_sum;\n        Int ColTh;\n\n        OSCL_UNUSED_ARG(dummy2);\n\n        dst = out + 64 ;\n        ColTh = *dst;\n        out += 128;\n        round = 1 << (FDCT_SHIFT - 1);\n\n        do  /* fdct_nextrow */\n        {\n            mask = 0x1FE;\n            curInt = (Int*) cur;\n            tmp = curInt[0];    /* contains 4 pixels */\n            k0 = mask & (tmp << 1);\n            k1 = mask & (tmp >> 7);\n            k2 = mask & (tmp >> 15);\n            k3 = mask & (tmp >> 23);\n            tmp = curInt[1];    /* another 4 pixels */\n            k4 =  mask & (tmp << 1);\n            k5 =  mask & (tmp >> 7);\n            k6 =  mask & (tmp >> 15);\n            k7 =  mask & (tmp >> 23);\n            cur += width;\n            /* fdct_1 */\n            k0 = k0 + k7;\n            k7 = k0 - (k7 << 1);\n            k1 = k1 + k6;\n            k6 = k1 - (k6 << 1);\n            k2 = k2 + k5;\n            k5 = k2 - (k5 << 1);\n            k3 = k3 + k4;\n            k4 = k3 - (k4 << 1);\n\n            k0 = k0 + k3;\n            k3 = k0 - (k3 << 1);\n            k1 = k1 + k2;\n            k2 = k1 - (k2 << 1);\n\n            k0 = k0 + k1;\n            /**********/\n            dst[0] = k0;\n            /* fdct_2 */\n            k4 = k4 + k5;\n            k5 = k5 + k6;\n            k6 = k6 + k7;\n            k2 = k2 + k3;\n            /* MUL2C k2,k5,724,FDCT_SHIFT */\n            /* k0, k1 become scratch */\n            /* assume FAST MULTIPLY */\n            k1 = mla724(k12, k5, round);\n            k0 = mla724(k12, k2, round);\n\n            k5 = k1 >> FDCT_SHIFT;\n            k2 = k0 >> FDCT_SHIFT;\n            /*****************/\n            k2 = k2 + k3;\n            /********/\n            dst[2] = k2;        /* col. 2 */\n            /* fdct_3 */\n            /* ROTATE k4,k6,392,946, FDCT_SHIFT */\n            /* assume FAST MULTIPLY */\n            /* k0, k1 are output */\n            k0 = k4 - k6;\n\n            k1 = mla392(k0, k14, round);\n            k0 = mla554(k4, k12, k1);\n            k1 = mla1338(k6, k14, k1);\n\n            k4 = k0 >> FDCT_SHIFT;\n            k6 = k1 >> FDCT_SHIFT;\n            /***********************/\n            k5 = k5 + k7;\n            k7 = (k7 << 1) - k5;\n            k7 = k7 - k4;\n            k5 = k5 + k6;\n            /********/\n            dst[1] = k5;        /* col. 1 */\n            dst[3] = k7;        /* col. 3 */\n            dst += 8;\n        }\n        while (dst < out);\n\n        out -= 64;\n        dst = out + 4;\n\n        /*  Vertical Block Loop  */\n        do  /* Vertical 8xDCT loop */\n        {\n            k0 = out[0];\n            k1 = out[8];\n            k2 = out[16];\n            k3 = out[24];\n            k4 = out[32];\n            k5 = out[40];\n            k6 = out[48];\n            k7 = out[56];\n\n            abs_sum = sum_abs(k0, k1, k2, k3, k4, k5, k6, k7);\n\n            if (abs_sum < ColTh)\n            {\n                out[0] = 0x7fff;\n                out++;\n                continue;\n            }\n            /* fdct_1 */\n            k0 = k0 + k7;\n            k7 = k0 - (k7 << 1);\n            k1 = k1 + k6;\n            k6 = k1 - (k6 << 1);\n            k2 = k2 + k5;\n            k5 = k2 - (k5 << 1);\n            k3 = k3 + k4;\n            k4 = k3 - (k4 << 1);\n\n            k0 = k0 + k3;\n            k3 = k0 - (k3 << 1);\n            k1 = k1 + k2;\n            k2 = k1 - (k2 << 1);\n\n            k0 = k0 + k1;\n            /**********/\n            out[0] = k0;   /* row 0 */\n            /* fdct_2 */\n            k4 = k4 + k5;\n            k5 = k5 + k6;\n            k6 = k6 + k7;\n            k2 = k2 + k3;\n            /* MUL2C k2,k5,724,FDCT_SHIFT */\n            /* k0, k1 become scratch */\n            /* assume FAST MULTIPLY */\n            k1 = mla724(k12, k5, round);\n            k0 = mla724(k12, k2, round);\n\n            k5 = k1 >> FDCT_SHIFT;\n            k2 = k0 >> FDCT_SHIFT;\n            /*****************/\n            k2 = k2 + k3;\n            /********/\n            out[16] = k2;           /* row 2 */\n            /* fdct_3 */\n            /* ROTATE k4,k6,392,946, FDCT_SHIFT */\n            /* assume FAST MULTIPLY */\n            /* k0, k1 are output */\n            k0 = k4 - k6;\n\n            k1 = mla392(k0, k14, round);\n            k0 = mla554(k4, k12, k1);\n            k1 = mla1338(k6, k14, k1);\n\n            k4 = k0 >> FDCT_SHIFT;\n            k6 = k1 >> FDCT_SHIFT;\n            /***********************/\n            k5 = k5 + k7;\n            k7 = (k7 << 1) - k5;\n            k7 = k7 - k4 ;\n            k5 = k5 + k6;\n            /********/\n            out[24] = k7 ;      /* row 3 */\n            out[8] = k5 ;       /* row 1 */\n            out++;\n        }\n        while ((UInt)out < (UInt)dst) ;\n\n        return ;\n    }\n\n    /**************************************************************************/\n    /*  Function:   Block2x2DCT_AANIntra\n        Date:       8/9/01\n        Input:      prev\n        Output:     out[64] ==> next block\n        Purpose:    Input directly from prev frame. output 2x2 DCT\n        Modified:\n    **************************************************************************/\n\n    Void Block2x2DCT_AANIntra(Short *out, UChar *cur, UChar *dummy2, Int width)\n    {\n        Short *dst;\n        register Int k0, k1, k2, k3, k4, k5, k6, k7;\n        Int round;\n        Int k12 = 0x022A02D4;\n        Int k14 = 0x018803B2;\n        Int mask;\n        Int *curInt, tmp;\n        Int abs_sum;\n        Int ColTh;\n\n        OSCL_UNUSED_ARG(dummy2);\n\n        dst = out + 64 ;\n        ColTh = *dst;\n        out += 128;\n        round = 1 << (FDCT_SHIFT - 1);\n\n        do  /* fdct_nextrow */\n        {\n            mask = 0x1FE;\n            curInt = (Int*) cur;\n            tmp = curInt[0];    /* contains 4 pixels */\n            k0 = mask & (tmp << 1);\n            k1 = mask & (tmp >> 7);\n            k2 = mask & (tmp >> 15);\n            k3 = mask & (tmp >> 23);\n            tmp = curInt[1];    /* another 4 pixels */\n            k4 =  mask & (tmp << 1);\n            k5 =  mask & (tmp >> 7);\n            k6 =  mask & (tmp >> 15);\n            k7 =  mask & (tmp >> 23);\n            cur += width;\n\n            /* fdct_1 */\n            k0 = k0 + k7;\n            k7 = k0 - (k7 << 1);\n            k1 = k1 + k6;\n            k6 = k1 - (k6 << 1);\n            k2 = k2 + k5;\n            k5 = k2 - (k5 << 1);\n            k3 = k3 + k4;\n            k4 = k3 - (k4 << 1);\n\n            k0 = k0 + k3;\n            k3 = k0 - (k3 << 1);\n            k1 = k1 + k2;\n            k2 = k1 - (k2 << 1);\n\n            k0 = k0 + k1;\n            /**********/\n            dst[0] = k0;\n            /* fdct_2 */\n            k4 = k4 + k5;\n            k5 = k5 + k6;\n            k6 = k6 + k7;\n            /* MUL2C k2,k5,724,FDCT_SHIFT */\n            /* k0, k1 become scratch */\n            /* assume FAST MULTIPLY */\n            k1 = mla724(k12, k5, round);\n\n            k5 = k1 >> FDCT_SHIFT;\n            /*****************/\n            /********/\n            /* fdct_3 */\n            /* ROTATE k4,k6,392,946, FDCT_SHIFT */\n            /* assume FAST MULTIPLY */\n            /* k0, k1 are output */\n            k1 = mla392(k4, k14, round);\n            k1 = mla946(k6, k14, k1);\n\n            k6 = k1 >> FDCT_SHIFT;\n            /***********************/\n            k5 = k5 + k7;\n            k5 = k5 + k6;\n            /********/\n            dst[1] = k5;\n            dst += 8;\n        }\n        while (dst < out);\n        out -= 64;\n        dst = out + 2;\n        /*  Vertical Block Loop  */\n        do  /* Vertical 8xDCT loop */\n        {\n            k0 = out[0];\n            k1 = out[8];\n            k2 = out[16];\n            k3 = out[24];\n            k4 = out[32];\n            k5 = out[40];\n            k6 = out[48];\n            k7 = out[56];\n\n            abs_sum = sum_abs(k0, k1, k2, k3, k4, k5, k6, k7);\n\n            if (abs_sum < ColTh)\n            {\n                out[0] = 0x7fff;\n                out++;\n                continue;\n            }\n            /* fdct_1 */\n            k0 = k0 + k7;\n            k7 = k0 - (k7 << 1);\n            k1 = k1 + k6;\n            k6 = k1 - (k6 << 1);\n            k2 = k2 + k5;\n            k5 = k2 - (k5 << 1);\n            k3 = k3 + k4;\n            k4 = k3 - (k4 << 1);\n\n            k0 = k0 + k3;\n            k3 = k0 - (k3 << 1);\n            k1 = k1 + k2;\n            k2 = k1 - (k2 << 1);\n\n            k0 = k0 + k1;\n            /**********/\n            out[0] = k0;        /* row 0 */\n            /* fdct_2 */\n            k4 = k4 + k5;\n            k5 = k5 + k6;\n            k6 = k6 + k7;\n            /* MUL2C k2,k5,724,FDCT_SHIFT */\n            /* k0, k1 become scratch */\n            /* assume FAST MULTIPLY */\n            k1 = mla724(k12, k5, round);\n\n            k5 = k1 >> FDCT_SHIFT;\n            /*****************/\n            /********/\n            /* fdct_3 */\n            /* ROTATE k4,k6,392,946, FDCT_SHIFT */\n            /* assume FAST MULTIPLY */\n            /* k0, k1 are output */\n            k1 = mla392(k4, k14, round);\n            k1 = mla946(k6, k14, k1);\n\n            k6 = k1 >> FDCT_SHIFT;\n            /***********************/\n            k5 = k5 + k7;\n            k5 = k5 + k6;\n            /********/\n            out[8] = k5 ;       /* row 1 */\n            out++;\n        }\n        while ((UInt)out < (UInt)dst) ;\n\n        return ;\n    }\n    /**************************************************************************/\n    /*  Function:   Block1x1DCTwSub\n        Date:       8/9/01\n        Input:      block\n        Output:     y\n        Purpose:    Compute DC value only\n        Modified:\n    **************************************************************************/\n    void Block1x1DCTwSub(Short *out, UChar *cur, UChar *pred, Int width)\n    {\n        UChar *end;\n        Int temp = 0;\n        Int offset2;\n\n        offset2 = width - 8;\n        end = pred + (16 << 3);\n        do\n        {\n            temp += (*cur++ - *pred++);\n            temp += (*cur++ - *pred++);\n            temp += (*cur++ - *pred++);\n            temp += (*cur++ - *pred++);\n            temp += (*cur++ - *pred++);\n            temp += (*cur++ - *pred++);\n            temp += (*cur++ - *pred++);\n            temp += (*cur++ - *pred++);\n            cur += offset2;\n            pred += 8;\n        }\n        while (pred < end) ;\n\n        out[1] = out[2] = out[3] = out[4] = out[5] = out[6] = out[7] = 0;\n        out[0] = temp >> 3;\n\n        return ;\n    }\n\n    /**************************************************************************/\n    /*  Function:   Block1x1DCTIntra\n        Date:       8/9/01\n        Input:      prev\n        Output:     out\n        Purpose:    Compute DC value only\n        Modified:\n    **************************************************************************/\n    void Block1x1DCTIntra(Short *out, UChar *cur, UChar *dummy2, Int width)\n    {\n        UChar *end;\n        Int temp = 0;\n        ULong word;\n\n        OSCL_UNUSED_ARG(dummy2);\n\n        end = cur + (width << 3);\n        do\n        {\n            word = *((ULong*)cur);\n            temp += (word >> 24);\n            temp += ((word >> 16) & 0xFF);\n            temp += ((word >> 8) & 0xFF);\n            temp += (word & 0xFF);\n\n            word = *((ULong*)(cur + 4));\n            temp += (word >> 24);\n            temp += ((word >> 16) & 0xFF);\n            temp += ((word >> 8) & 0xFF);\n            temp += (word & 0xFF);\n\n            cur += width;\n        }\n        while (cur < end) ;\n\n        out[1] = out[2] = out[3] = out[4] = out[5] = out[6] = out[7] = 0;\n        out[0] = temp >> 3;\n\n        return ;\n    }\n\n#ifdef __cplusplus\n}\n#endif\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/dct.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef _DCT_H_\n#define _DCT_H_\n\nconst static Int ColThInter[32] = {0, 0x1C, 0x4C, 0x6C, 0x9C, 0xBC, 0xEC, 0x10C,\n                                   0x13C, 0x15C, 0x18C, 0x1AC, 0x1DC, 0x1FC, 0x22C, 0x24C,\n                                   0x27C, 0x29C, 0x2CC, 0x2EC, 0x31C, 0x33C, 0x36C, 0x38C,\n                                   0x3BC, 0x3DC, 0x40C, 0x42C, 0x45C, 0x47C, 0x4AC, 0x4CC\n                                  };\n\nconst static Int ColThIntra[32] = {0, 0x1C, 0x3C, 0x5C, 0x7C, 0x9C, 0xBC, 0xDC,\n                                   0xFC, 0x11C, 0x13C, 0x15C, 0x17C, 0x19C, 0x1BC, 0x1DC,\n                                   0x1FC, 0x21C, 0x23C, 0x25C, 0x27C, 0x29C, 0x2BC, 0x2DC,\n                                   0x2FC, 0x31C, 0x33C, 0x35C, 0x37C, 0x39C, 0x3BC, 0x3DC\n                                  };\n\n/******************************************************/\n/********** IDCT part **************************/\nconst static unsigned char imask[8] = {128, 64, 32, 16, 8, 4, 2, 1};\nconst static unsigned char mask[8] = {0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0xfe};\n\n#define W1 2841                 /* 2048*sqrt(2)*cos(1*pi/16) */\n#define W2 2676                 /* 2048*sqrt(2)*cos(2*pi/16) */\n#define W3 2408                 /* 2048*sqrt(2)*cos(3*pi/16) */\n#define W5 1609                 /* 2048*sqrt(2)*cos(5*pi/16) */\n#define W6 1108                 /* 2048*sqrt(2)*cos(6*pi/16) */\n#define W7 565                  /* 2048*sqrt(2)*cos(7*pi/16) */\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n    /* Reduced input IDCT */\n    void idct_col0(Short *blk);\n    void idct_col1(Short *blk);\n    void idct_col2(Short *blk);\n    void idct_col3(Short *blk);\n    void idct_col4(Short *blk);\n    void idct_col0x40(Short *blk);\n    void idct_col0x20(Short *blk);\n    void idct_col0x10(Short *blk);\n\n    void idct_rowInter(Short *srce, UChar *rec, Int lx);\n    void idct_row0Inter(Short *blk, UChar *rec, Int lx);\n    void idct_row1Inter(Short *blk, UChar *rec, Int lx);\n    void idct_row2Inter(Short *blk, UChar *rec, Int lx);\n    void idct_row3Inter(Short *blk, UChar *rec, Int lx);\n    void idct_row4Inter(Short *blk, UChar *rec, Int lx);\n    void idct_row0x40Inter(Short *blk, UChar *rec, Int lx);\n    void idct_row0x20Inter(Short *blk, UChar *rec, Int lx);\n    void idct_row0x10Inter(Short *blk, UChar *rec, Int lx);\n    void idct_row0xCCInter(Short *blk, UChar *rec, Int lx);\n    void idct_rowIntra(Short *srce, UChar *rec, Int lx);\n    void idct_row0Intra(Short *blk, UChar *rec, Int lx);\n    void idct_row1Intra(Short *blk, UChar *rec, Int lx);\n    void idct_row2Intra(Short *blk, UChar *rec, Int lx);\n    void idct_row3Intra(Short *blk, UChar *rec, Int lx);\n    void idct_row4Intra(Short *blk, UChar *rec, Int lx);\n    void idct_row0x40Intra(Short *blk, UChar *rec, Int lx);\n    void idct_row0x20Intra(Short *blk, UChar *rec, Int lx);\n    void idct_row0x10Intra(Short *blk, UChar *rec, Int lx);\n    void idct_row0xCCIntra(Short *blk, UChar *rec, Int lx);\n    void idct_rowzmv(Short *srce, UChar *rec, UChar *prev, Int lx);\n    void idct_row0zmv(Short *blk, UChar *rec, UChar *prev, Int lx);\n    void idct_row1zmv(Short *blk, UChar *rec, UChar *prev, Int lx);\n    void idct_row2zmv(Short *blk, UChar *rec, UChar *prev, Int lx);\n    void idct_row3zmv(Short *blk, UChar *rec, UChar *prev, Int lx);\n    void idct_row4zmv(Short *blk, UChar *rec, UChar *prev, Int lx);\n    void idct_row0x40zmv(Short *blk, UChar *rec, UChar *prev, Int lx);\n    void idct_row0x20zmv(Short *blk, UChar *rec, UChar *prev, Int lx);\n    void idct_row0x10zmv(Short *blk, UChar *rec, UChar *prev, Int lx);\n    void idct_row0xCCzmv(Short *blk, UChar *rec, UChar *prev, Int lx);\n\n\n#ifdef __cplusplus\n}\n#endif\n\n/* Look-up table mapping to RIDCT from bitmap */\n#ifdef SMALL_DCT\n\nstatic void (*const idctcolVCA[16])(Short*) =\n{\n    &idct_col0, &idct_col4, &idct_col3, &idct_col4,\n    &idct_col2, &idct_col4, &idct_col3, &idct_col4,\n    &idct_col1, &idct_col4, &idct_col3, &idct_col4,\n    &idct_col2, &idct_col4, &idct_col3, &idct_col4\n};\n\nstatic void (*const idctrowVCAInter[16])(Short*, UChar*, Int) =\n{\n    &idct_row0Inter, &idct_row4Inter, &idct_row3Inter, &idct_row4Inter,\n    &idct_row2Inter, &idct_row4Inter, &idct_row3Inter, &idct_row4Inter,\n    &idct_row1Inter, &idct_row4Inter, &idct_row3Inter, &idct_row4Inter,\n    &idct_row2Inter, &idct_row4Inter, &idct_row3Inter, &idct_row4Inter\n};\n\nstatic void (*const idctrowVCAzmv[16])(Short*, UChar*, UChar*, Int) =\n{\n    &idct_row0zmv, &idct_row4zmv, &idct_row3zmv, &idct_row4zmv,\n    &idct_row2zmv, &idct_row4zmv, &idct_row3zmv, &idct_row4zmv,\n    &idct_row1zmv, &idct_row4zmv, &idct_row3zmv, &idct_row4zmv,\n    &idct_row2zmv, &idct_row4zmv, &idct_row3zmv, &idct_row4zmv\n};\n\n\nstatic void (*const idctrowVCAIntra[16])(Short*, UChar*, Int) =\n{\n    &idct_row0Intra, &idct_row4Intra, &idct_row3Intra, &idct_row4Intra,\n    &idct_row2Intra, &idct_row4Intra, &idct_row3Intra, &idct_row4Intra,\n    &idct_row1Intra, &idct_row4Intra, &idct_row3Intra, &idct_row4Intra,\n    &idct_row2Intra, &idct_row4Intra, &idct_row3Intra, &idct_row4Intra\n};\n\n#else /* SMALL_DCT */\n\nstatic void (*const idctcolVCA[16])(Short*) =\n{\n    &idct_col0, &idct_col0x10, &idct_col0x20, &idct_col4,\n    &idct_col0x40, &idct_col4, &idct_col3, &idct_col4,\n    &idct_col1, &idct_col4, &idct_col3, &idct_col4,\n    &idct_col2, &idct_col4, &idct_col3, &idct_col4\n};\n\nstatic void (*const idctrowVCAInter[16])(Short*, UChar*, Int) =\n{\n    &idct_row0Inter, &idct_row0x10Inter, &idct_row0x20Inter, &idct_row4Inter,\n    &idct_row0x40Inter, &idct_row4Inter, &idct_row3Inter, &idct_row4Inter,\n    &idct_row1Inter, &idct_row4Inter, &idct_row3Inter, &idct_row4Inter,\n    &idct_row2Inter, &idct_row4Inter, &idct_row3Inter, &idct_row4Inter\n};\n\nstatic void (*const idctrowVCAzmv[16])(Short*, UChar*, UChar*, Int) =\n{\n    &idct_row0zmv, &idct_row0x10zmv, &idct_row0x20zmv, &idct_row4zmv,\n    &idct_row0x40zmv, &idct_row4zmv, &idct_row3zmv, &idct_row4zmv,\n    &idct_row1zmv, &idct_row4zmv, &idct_row3zmv, &idct_row4zmv,\n    &idct_row2zmv, &idct_row4zmv, &idct_row3zmv, &idct_row4zmv\n};\n\nstatic void (*const idctrowVCAIntra[16])(Short*, UChar*, Int) =\n{\n    &idct_row0Intra, &idct_row0x10Intra, &idct_row0x20Intra, &idct_row4Intra,\n    &idct_row0x40Intra, &idct_row4Intra, &idct_row3Intra, &idct_row4Intra,\n    &idct_row1Intra, &idct_row4Intra, &idct_row3Intra, &idct_row4Intra,\n    &idct_row2Intra, &idct_row4Intra, &idct_row3Intra, &idct_row4Intra\n};\n\n#endif /* SMALL_DCT */\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n    /* part in AppVCA_dct.c */\n//void Block1x1DCTzmv (Short *out,UChar *prev,UChar *cur,UChar *rec,Int lx,Int chroma);\n    void Block1x1DCTwSub(Short *out, UChar *cur, UChar *prev, Int pitch_chroma);\n    void Block1x1DCTIntra(Short *out, UChar *cur, UChar *dummy1, Int pitch_chroma);\n    /* This part is in dct_aan.c */\n    Void BlockDCT_AANwSub(Short *out, UChar *cur, UChar *prev, Int pitch_chroma);\n    Void Block4x4DCT_AANwSub(Short *out, UChar *cur, UChar *prev, Int pitch_chroma);\n    Void Block2x2DCT_AANwSub(Short *out, UChar *cur, UChar *prev, Int pitch_chroma);\n//Void BlockDCT_AANzmv(Short *out,UChar *prev,UChar *cur,UChar *rec,Int ColTh,Int lx,Int chroma);\n//Void Block4x4DCT_AANzmv(Short *out,UChar *prev,UChar *cur,UChar *rec,Int ColTh,Int lx,Int chroma);\n//Void Block2x2DCT_AANzmv(Short *out,UChar *prev,UChar *cur,UChar *rec,Int ColTh,Int lx,Int chroma);\n    Void BlockDCT_AANIntra(Short *out, UChar *cur, UChar *dummy1, Int pitch_chroma);\n    Void Block4x4DCT_AANIntra(Short *out, UChar *cur, UChar *dummy1, Int pitch_chroma);\n    Void Block2x2DCT_AANIntra(Short *out, UChar *cur, UChar *dummy1, Int pitch_chroma);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif //_DCT_H_\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/dct_inline.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*  Filename: dct_inline.h                                                      */\n/*  Description: Implementation for in-line functions used in dct.cpp           */\n/*  Modified:                                                                   */\n/*********************************************************************************/\n#ifndef _DCT_INLINE_H_\n#define _DCT_INLINE_H_\n\n#if !defined(PV_ARM_GCC_V5)\n\n#include \"oscl_base_macros.h\"\n\n__inline int32 mla724(int32 op1, int32 op2, int32 op3)\n{\n    int32 out;\n\n    OSCL_UNUSED_ARG(op1);\n\n    out = op2 * 724 + op3; /* op1 is not used here */\n\n    return out;\n}\n\n__inline int32 mla392(int32 k0, int32 k14, int32 round)\n{\n    int32 k1;\n\n    OSCL_UNUSED_ARG(k14);\n\n    k1 = k0 * 392 + round;\n\n    return k1;\n}\n\n__inline int32 mla554(int32 k4, int32 k12, int32 k1)\n{\n    int32 k0;\n\n    OSCL_UNUSED_ARG(k12);\n\n    k0 = k4 * 554 + k1;\n\n    return k0;\n}\n\n__inline int32 mla1338(int32 k6, int32 k14, int32 k1)\n{\n    int32 out;\n\n    OSCL_UNUSED_ARG(k14);\n\n    out = k6 * 1338 + k1;\n\n    return out;\n}\n\n__inline int32 mla946(int32 k6, int32 k14, int32 k1)\n{\n    int32 out;\n\n    OSCL_UNUSED_ARG(k14);\n\n    out = k6 * 946 + k1;\n\n    return out;\n}\n\n__inline int32 sum_abs(int32 k0, int32 k1, int32 k2, int32 k3,\n                       int32 k4, int32 k5, int32 k6, int32 k7)\n{\n    int32 carry, abs_sum;\n\n    carry = k0 >> 31;\n    abs_sum = (k0 ^ carry);\n    carry = k1 >> 31;\n    abs_sum += (k1 ^ carry) - carry;\n    carry = k2 >> 31;\n    abs_sum += (k2 ^ carry) - carry;\n    carry = k3 >> 31;\n    abs_sum += (k3 ^ carry) - carry;\n    carry = k4 >> 31;\n    abs_sum += (k4 ^ carry) - carry;\n    carry = k5 >> 31;\n    abs_sum += (k5 ^ carry) - carry;\n    carry = k6 >> 31;\n    abs_sum += (k6 ^ carry) - carry;\n    carry = k7 >> 31;\n    abs_sum += (k7 ^ carry) - carry;\n\n    return abs_sum;\n}\n\n#elif defined(__CC_ARM)  /* only work with arm v5 */\n\n#if defined(__TARGET_ARCH_5TE)\n\n__inline int32 mla724(int32 op1, int32 op2, int32 op3)\n{\n    int32 out;\n\n    __asm\n    {\n        smlabb out, op1, op2, op3\n    }\n\n    return out;\n}\n\n__inline int32 mla392(int32 k0, int32 k14, int32 round)\n{\n    int32 k1;\n\n    __asm\n    {\n        smlabt k1, k0, k14, round\n    }\n\n    return k1;\n}\n\n__inline int32 mla554(int32 k4, int32 k12, int32 k1)\n{\n    int32 k0;\n\n    __asm\n    {\n        smlabt k0, k4, k12, k1\n    }\n\n    return k0;\n}\n\n__inline int32 mla1338(int32 k6, int32 k14, int32 k1)\n{\n    int32 out;\n\n    __asm\n    {\n        smlabb out, k6, k14, k1\n    }\n\n    return out;\n}\n\n__inline int32 mla946(int32 k6, int32 k14, int32 k1)\n{\n    int32 out;\n\n    __asm\n    {\n        smlabb out, k6, k14, k1\n    }\n\n    return out;\n}\n\n#else // not ARM5TE\n\n\n__inline int32 mla724(int32 op1, int32 op2, int32 op3)\n{\n    int32 out;\n\n    __asm\n    {\n        and out, op2, #0xFFFF\n        mla out, op1, out, op3\n    }\n\n    return out;\n}\n\n__inline int32 mla392(int32 k0, int32 k14, int32 round)\n{\n    int32 k1;\n\n    __asm\n    {\n        mov k1, k14, asr #16\n        mla k1, k0, k1, round\n    }\n\n    return k1;\n}\n\n__inline int32 mla554(int32 k4, int32 k12, int32 k1)\n{\n    int32 k0;\n\n    __asm\n    {\n        mov  k0, k12, asr #16\n        mla k0, k4, k0, k1\n    }\n\n    return k0;\n}\n\n__inline int32 mla1338(int32 k6, int32 k14, int32 k1)\n{\n    int32 out;\n\n    __asm\n    {\n        and out, k14, 0xFFFF\n        mla out, k6, out, k1\n    }\n\n    return out;\n}\n\n__inline int32 mla946(int32 k6, int32 k14, int32 k1)\n{\n    int32 out;\n\n    __asm\n    {\n        and out, k14, 0xFFFF\n        mla out, k6, out, k1\n    }\n\n    return out;\n}\n\n#endif\n\n__inline int32 sum_abs(int32 k0, int32 k1, int32 k2, int32 k3,\n                       int32 k4, int32 k5, int32 k6, int32 k7)\n{\n    int32 carry, abs_sum;\n    __asm\n    {\n        eor     carry, k0, k0, asr #31 ;\n        eors    abs_sum, k1, k1, asr #31 ;\n        adc     abs_sum, abs_sum, carry ;\n        eors    carry,  k2, k2, asr #31 ;\n        adc     abs_sum, abs_sum, carry ;\n        eors    carry,  k3, k3, asr #31 ;\n        adc     abs_sum, abs_sum, carry ;\n        eors    carry,  k4, k4, asr #31 ;\n        adc     abs_sum, abs_sum, carry ;\n        eors    carry,  k5, k5, asr #31 ;\n        adc     abs_sum, abs_sum, carry ;\n        eors    carry,  k6, k6, asr #31 ;\n        adc     abs_sum, abs_sum, carry ;\n        eors    carry,  k7, k7, asr #31 ;\n        adc     abs_sum, abs_sum, carry ;\n    }\n\n    return abs_sum;\n}\n\n#elif defined(PV_ARM_GCC_V5)  /* ARM GNU COMPILER  */\n\n__inline int32 mla724(int32 op1, int32 op2, int32 op3)\n{\n    register int32 out;\n    register int32 aa = (int32)op1;\n    register int32 bb = (int32)op2;\n    register int32 cc = (int32)op3;\n\n    asm volatile(\"smlabb %0, %1, %2, %3\"\n             : \"=&r\"(out)\n                         : \"r\"(aa),\n                         \"r\"(bb),\n                         \"r\"(cc));\n    return out;\n}\n\n\n__inline int32 mla392(int32 k0, int32 k14, int32 round)\n{\n    register int32 out;\n    register int32 aa = (int32)k0;\n    register int32 bb = (int32)k14;\n    register int32 cc = (int32)round;\n\n    asm volatile(\"smlabt %0, %1, %2, %3\"\n             : \"=&r\"(out)\n                         : \"r\"(aa),\n                         \"r\"(bb),\n                         \"r\"(cc));\n\n    return out;\n}\n\n__inline int32 mla554(int32 k4, int32 k12, int32 k1)\n{\n    register int32 out;\n    register int32 aa = (int32)k4;\n    register int32 bb = (int32)k12;\n    register int32 cc = (int32)k1;\n\n    asm volatile(\"smlabt %0, %1, %2, %3\"\n             : \"=&r\"(out)\n                         : \"r\"(aa),\n                         \"r\"(bb),\n                         \"r\"(cc));\n\n    return out;\n}\n\n__inline int32 mla1338(int32 k6, int32 k14, int32 k1)\n{\n    register int32 out;\n    register int32 aa = (int32)k6;\n    register int32 bb = (int32)k14;\n    register int32 cc = (int32)k1;\n\n    asm volatile(\"smlabb %0, %1, %2, %3\"\n             : \"=&r\"(out)\n                         : \"r\"(aa),\n                         \"r\"(bb),\n                         \"r\"(cc));\n    return out;\n}\n\n__inline int32 mla946(int32 k6, int32 k14, int32 k1)\n{\n    register int32 out;\n    register int32 aa = (int32)k6;\n    register int32 bb = (int32)k14;\n    register int32 cc = (int32)k1;\n\n    asm volatile(\"smlabb %0, %1, %2, %3\"\n             : \"=&r\"(out)\n                         : \"r\"(aa),\n                         \"r\"(bb),\n                         \"r\"(cc));\n    return out;\n}\n\n__inline int32 sum_abs(int32 k0, int32 k1, int32 k2, int32 k3,\n                       int32 k4, int32 k5, int32 k6, int32 k7)\n{\n    register int32 carry;\n    register int32 abs_sum;\n    register int32 aa = (int32)k0;\n    register int32 bb = (int32)k1;\n    register int32 cc = (int32)k2;\n    register int32 dd = (int32)k3;\n    register int32 ee = (int32)k4;\n    register int32 ff = (int32)k5;\n    register int32 gg = (int32)k6;\n    register int32 hh = (int32)k7;\n\n    asm volatile(\"eor  %0, %2, %2, asr #31\\n\\t\"\n                 \"eors %1, %3, %3, asr #31\\n\\t\"\n                 \"adc  %1, %1, %0\\n\\t\"\n                 \"eors %0, %4, %4, asr #31\\n\\t\"\n                 \"adc  %1, %1, %0\\n\\t\"\n                 \"eors %0, %5, %5, asr #31\\n\\t\"\n                 \"adc  %1, %1, %0\\n\\t\"\n                 \"eors %0, %6, %6, asr #31\\n\\t\"\n                 \"adc  %1, %1, %0\\n\\t\"\n                 \"eors %0, %7, %7, asr #31\\n\\t\"\n                 \"adc  %1, %1, %0\\n\\t\"\n                 \"eors %0, %8, %8, asr #31\\n\\t\"\n                 \"adc  %1, %1, %0\\n\\t\"\n                 \"eors %0, %9, %9, asr #31\\n\\t\"\n                 \"adc  %1, %1, %0\\n\\t\"\n\n             : \"=&r\"(carry),\n                 \"=&r\"(abs_sum):\n                         \"r\"(aa),\n                         \"r\"(bb),\n                         \"r\"(cc),\n                         \"r\"(dd),\n                         \"r\"(ee),\n                         \"r\"(ff),\n                         \"r\"(gg),\n                         \"r\"(hh));\n\n    return abs_sum;\n}\n\n#endif // Diff. OS\n\n#endif //_DCT_INLINE_H_\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/fastcodemb.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"oscl_base_macros.h\" // for OSCL_UNUSED_ARG\n#include \"mp4def.h\"\n#include \"mp4lib_int.h\"\n#include \"mp4enc_lib.h\"\n#include \"dct.h\"\n#include \"m4venc_oscl.h\"\n\n/* ======================================================================== */\n/*  Function : CodeMB_H263( )                                               */\n/*  Date     : 8/15/2001                                                    */\n/*  Purpose  : Perform residue calc (only zero MV), DCT, H263 Quant/Dequant,*/\n/*              IDCT and motion compensation.Modified from FastCodeMB()     */\n/*  Input    :                                                              */\n/*      video       Video encoder data structure                            */\n/*      function    Approximate DCT function, scaling and threshold         */\n/*      ncoefblck   Array for last nonzero coeff for speedup in VlcEncode   */\n/*      QP      Combined offset from the origin to the current          */\n/*                  macroblock  and QP  for current MB.                     */\n/*    Output     :                                                          */\n/*      video->outputMB     Quantized DCT coefficients.                     */\n/*      currVop->yChan,uChan,vChan  Reconstructed pixels                    */\n/*                                                                          */\n/*  Return   :   PV_STATUS                                                  */\n/*  Modified :                                                              */\n/*           2/26/01\n            -modified threshold based on correlation coeff 0.75 only for mode H.263\n            -ncoefblck[] as input,  to keep position of last non-zero coeff*/\n/*           8/10/01\n            -modified threshold based on correlation coeff 0.5\n            -used column threshold to speedup column DCT.\n            -used bitmap zigzag to speedup RunLevel().                      */\n/* ======================================================================== */\n\nPV_STATUS CodeMB_H263(VideoEncData *video, approxDCT *function, Int QP, Int ncoefblck[])\n{\n    Int sad, k, CBP, mbnum = video->mbnum;\n    Short *output, *dataBlock;\n    UChar Mode = video->headerInfo.Mode[mbnum];\n    UChar *bitmapcol, *bitmaprow = video->bitmaprow;\n    UInt  *bitmapzz ;\n    UChar shortHeader = video->vol[video->currLayer]->shortVideoHeader;\n    Int dc_scaler = 8;\n    Int intra = (Mode == MODE_INTRA || Mode == MODE_INTRA_Q);\n    struct QPstruct QuantParam;\n    Int dctMode, DctTh1;\n    Int ColTh;\n    Int(*BlockQuantDequantH263)(Short *, Short *, struct QPstruct *,\n                                UChar[], UChar *, UInt *, Int, Int, Int, UChar);\n    Int(*BlockQuantDequantH263DC)(Short *, Short *, struct QPstruct *,\n                                  UChar *, UInt *, Int, UChar);\n    void (*BlockDCT1x1)(Short *, UChar *, UChar *, Int);\n    void (*BlockDCT2x2)(Short *, UChar *, UChar *, Int);\n    void (*BlockDCT4x4)(Short *, UChar *, UChar *, Int);\n    void (*BlockDCT8x8)(Short *, UChar *, UChar *, Int);\n\n    /* motion comp. related var. */\n    Vop *currVop = video->currVop;\n    VideoEncFrameIO *inputFrame = video->input;\n    Int ind_x = video->outputMB->mb_x;\n    Int ind_y = video->outputMB->mb_y;\n    Int lx = currVop->pitch;\n    Int width = currVop->width;\n    UChar *rec, *input, *pred;\n    Int offset = QP >> 5;  /* QP is combined offset and QP */\n    Int offsetc = (offset >> 2) + (ind_x << 2); /* offset for chrom */\n    /*****************************/\n\n    OSCL_UNUSED_ARG(function);\n\n    output = video->outputMB->block[0];\n    CBP = 0;\n    QP = QP & 0x1F;\n//  M4VENC_MEMSET(output,0,(sizeof(Short)<<6)*6); /* reset quantized coeff. to zero , 7/24/01*/\n\n    QuantParam.QPx2 = QP << 1;\n    QuantParam.QP = QP;\n    QuantParam.QPdiv2 = QP >> 1;\n    QuantParam.QPx2plus = QuantParam.QPx2 + QuantParam.QPdiv2;\n    QuantParam.Addition = QP - 1 + (QP & 0x1);\n\n    if (intra)\n    {\n        BlockDCT1x1 = &Block1x1DCTIntra;\n        BlockDCT2x2 = &Block2x2DCT_AANIntra;\n        BlockDCT4x4 = &Block4x4DCT_AANIntra;\n        BlockDCT8x8 = &BlockDCT_AANIntra;\n        BlockQuantDequantH263 = &BlockQuantDequantH263Intra;\n        BlockQuantDequantH263DC = &BlockQuantDequantH263DCIntra;\n        if (shortHeader)\n        {\n            dc_scaler = 8;\n        }\n        else\n        {\n            dc_scaler = cal_dc_scalerENC(QP, 1); /* luminance blocks */\n        }\n        DctTh1 = (Int)(dc_scaler * 3);//*1.829\n        ColTh = ColThIntra[QP];\n    }\n    else\n    {\n        BlockDCT1x1 = &Block1x1DCTwSub;\n        BlockDCT2x2 = &Block2x2DCT_AANwSub;\n        BlockDCT4x4 = &Block4x4DCT_AANwSub;\n        BlockDCT8x8 = &BlockDCT_AANwSub;\n\n        BlockQuantDequantH263 = &BlockQuantDequantH263Inter;\n        BlockQuantDequantH263DC = &BlockQuantDequantH263DCInter;\n        ColTh = ColThInter[QP];\n        DctTh1 = (Int)(16 * QP);  //9*QP;\n    }\n\n    rec = currVop->yChan + offset;\n    input = inputFrame->yChan + offset;\n    if (lx != width) input -= (ind_y << 9);  /* non-padded offset */\n\n    dataBlock = video->dataBlock;\n    pred = video->predictedMB;\n\n    for (k = 0; k < 6; k++)\n    {\n        CBP <<= 1;\n        bitmapcol = video->bitmapcol[k];\n        bitmapzz = video->bitmapzz[k];  /*  7/30/01 */\n        if (k < 4)\n        {\n            sad = video->mot[mbnum][k+1].sad;\n            if (k&1)\n            {\n                rec += 8;\n                input += 8;\n            }\n            else if (k == 2)\n            {\n                dctMode = ((width << 3) - 8);\n                input += dctMode;\n                dctMode = ((lx << 3) - 8);\n                rec += dctMode;\n            }\n        }\n        else\n        {\n            if (k == 4)\n            {\n                rec = currVop->uChan + offsetc;\n                input = inputFrame->uChan + offsetc;\n                if (lx != width) input -= (ind_y << 7);\n                lx >>= 1;\n                width >>= 1;\n                if (intra)\n                {\n                    sad = getBlockSum(input, width);\n                    if (shortHeader)\n                        dc_scaler = 8;\n                    else\n                    {\n                        dc_scaler = cal_dc_scalerENC(QP, 2); /* chrominance blocks */\n                    }\n                    DctTh1 = (Int)(dc_scaler * 3);//*1.829\n                }\n                else\n                    sad = Sad8x8(input, pred, width);\n            }\n            else\n            {\n                rec = currVop->vChan + offsetc;\n                input = inputFrame->vChan + offsetc;\n                if (lx != width) input -= (ind_y << 7);\n                if (intra)\n                {\n                    sad = getBlockSum(input, width);\n                }\n                else\n                    sad = Sad8x8(input, pred, width);\n            }\n        }\n\n        if (sad < DctTh1 && !(shortHeader && intra)) /* all-zero */\n        {                       /* For shortHeader intra block, DC value cannot be zero */\n            dctMode = 0;\n            CBP |= 0;\n            ncoefblck[k] = 0;\n        }\n        else if (sad < 18*QP/*(QP<<4)*/) /* DC-only */\n        {\n            dctMode = 1;\n            BlockDCT1x1(dataBlock, input, pred, width);\n\n            CBP |= (*BlockQuantDequantH263DC)(dataBlock, output, &QuantParam,\n                                              bitmaprow + k, bitmapzz, dc_scaler, shortHeader);\n            ncoefblck[k] = 1;\n        }\n        else\n        {\n\n            dataBlock[64] = ColTh;\n\n            if (sad < 22*QP/*(QP<<4)+(QP<<1)*/)  /* 2x2 DCT */\n            {\n                dctMode = 2;\n                BlockDCT2x2(dataBlock, input, pred, width);\n                ncoefblck[k] = 6;\n            }\n            else if (sad < (QP << 5)) /* 4x4 DCT */\n            {\n                dctMode = 4;\n                BlockDCT4x4(dataBlock, input, pred, width);\n                ncoefblck[k] = 26;\n            }\n            else /* Full-DCT */\n            {\n                dctMode = 8;\n                BlockDCT8x8(dataBlock, input, pred, width);\n                ncoefblck[k] = 64;\n            }\n\n            CBP |= (*BlockQuantDequantH263)(dataBlock, output, &QuantParam,\n                                            bitmapcol, bitmaprow + k, bitmapzz, dctMode, k, dc_scaler, shortHeader);\n        }\n        BlockIDCTMotionComp(dataBlock, bitmapcol, bitmaprow[k], dctMode, rec, pred, (lx << 1) | intra);\n        output += 64;\n        if (!(k&1))\n        {\n            pred += 8;\n        }\n        else\n        {\n            pred += 120;\n        }\n    }\n\n    video->headerInfo.CBP[mbnum] = CBP; /*  5/18/2001 */\n    return PV_SUCCESS;\n}\n\n#ifndef NO_MPEG_QUANT\n/* ======================================================================== */\n/*  Function : CodeMB_MPEG( )                                               */\n/*  Date     : 8/15/2001                                                    */\n/*  Purpose  : Perform residue calc (only zero MV), DCT, MPEG Quant/Dequant,*/\n/*              IDCT and motion compensation.Modified from FastCodeMB()     */\n/*  Input    :                                                              */\n/*      video       Video encoder data structure                            */\n/*      function    Approximate DCT function, scaling and threshold         */\n/*      ncoefblck   Array for last nonzero coeff for speedup in VlcEncode   */\n/*      QP      Combined offset from the origin to the current          */\n/*                  macroblock  and QP  for current MB.                     */\n/*    Output     :                                                          */\n/*      video->outputMB     Quantized DCT coefficients.                     */\n/*      currVop->yChan,uChan,vChan  Reconstructed pixels                    */\n/*                                                                          */\n/*  Return   :   PV_STATUS                                                  */\n/*  Modified :                                                              */\n/*           2/26/01\n            -modified threshold based on correlation coeff 0.75 only for mode H.263\n            -ncoefblck[] as input, keep position of last non-zero coeff*/\n/*           8/10/01\n            -modified threshold based on correlation coeff 0.5\n            -used column threshold to speedup column DCT.\n            -used bitmap zigzag to speedup RunLevel().                      */\n/* ======================================================================== */\n\nPV_STATUS CodeMB_MPEG(VideoEncData *video, approxDCT *function, Int QP, Int ncoefblck[])\n{\n    Int sad, k, CBP, mbnum = video->mbnum;\n    Short *output, *dataBlock;\n    UChar Mode = video->headerInfo.Mode[mbnum];\n    UChar *bitmapcol, *bitmaprow = video->bitmaprow;\n    UInt  *bitmapzz ;\n    Int dc_scaler = 8;\n    Vol *currVol = video->vol[video->currLayer];\n    Int intra = (Mode == MODE_INTRA || Mode == MODE_INTRA_Q);\n    Int *qmat;\n    Int dctMode, DctTh1, DctTh2, DctTh3, DctTh4;\n    Int ColTh;\n\n    Int(*BlockQuantDequantMPEG)(Short *, Short *, Int, Int *,\n                                UChar [], UChar *, UInt *, Int,  Int, Int);\n    Int(*BlockQuantDequantMPEGDC)(Short *, Short *, Int, Int *,\n                                  UChar [], UChar *, UInt *, Int);\n\n    void (*BlockDCT1x1)(Short *, UChar *, UChar *, Int);\n    void (*BlockDCT2x2)(Short *, UChar *, UChar *, Int);\n    void (*BlockDCT4x4)(Short *, UChar *, UChar *, Int);\n    void (*BlockDCT8x8)(Short *, UChar *, UChar *, Int);\n\n    /* motion comp. related var. */\n    Vop *currVop = video->currVop;\n    VideoEncFrameIO *inputFrame = video->input;\n    Int ind_x = video->outputMB->mb_x;\n    Int ind_y = video->outputMB->mb_y;\n    Int lx = currVop->pitch;\n    Int width = currVop->width;\n    UChar *rec, *input, *pred;\n    Int offset = QP >> 5;\n    Int offsetc = (offset >> 2) + (ind_x << 2); /* offset for chrom */\n    /*****************************/\n\n    OSCL_UNUSED_ARG(function);\n\n    output = video->outputMB->block[0];\n    CBP = 0;\n    QP = QP & 0x1F;\n//  M4VENC_MEMSET(output,0,(sizeof(Short)<<6)*6); /* reset quantized coeff. to zero ,  7/24/01*/\n\n    if (intra)\n    {\n        BlockDCT1x1 = &Block1x1DCTIntra;\n        BlockDCT2x2 = &Block2x2DCT_AANIntra;\n        BlockDCT4x4 = &Block4x4DCT_AANIntra;\n        BlockDCT8x8 = &BlockDCT_AANIntra;\n\n        BlockQuantDequantMPEG = &BlockQuantDequantMPEGIntra;\n        BlockQuantDequantMPEGDC = &BlockQuantDequantMPEGDCIntra;\n        dc_scaler = cal_dc_scalerENC(QP, 1); /* luminance blocks */\n        qmat = currVol->iqmat;\n        DctTh1 = (Int)(3 * dc_scaler);//2*dc_scaler);\n        DctTh2 = (Int)((1.25 * QP - 1) * qmat[1] * 0.45);//0.567);//0.567);\n        DctTh3 = (Int)((1.25 * QP - 1) * qmat[2] * 0.55);//1.162); /*  8/2/2001 */\n        DctTh4 = (Int)((1.25 * QP - 1) * qmat[32] * 0.8);//1.7583);//0.7942);\n        ColTh = ColThIntra[QP];\n    }\n    else\n    {\n        BlockDCT1x1 = &Block1x1DCTwSub;\n        BlockDCT2x2 = &Block2x2DCT_AANwSub;\n        BlockDCT4x4 = &Block4x4DCT_AANwSub;\n        BlockDCT8x8 = &BlockDCT_AANwSub;\n\n        BlockQuantDequantMPEG = &BlockQuantDequantMPEGInter;\n        BlockQuantDequantMPEGDC = &BlockQuantDequantMPEGDCInter;\n        qmat = currVol->niqmat;\n        DctTh1 = (Int)(((QP << 1) - 0.5) * qmat[0] * 0.4);//0.2286);//0.3062);\n        DctTh2 = (Int)(((QP << 1) - 0.5) * qmat[1] * 0.45);//0.567);//0.4);\n        DctTh3 = (Int)(((QP << 1) - 0.5) * qmat[2] * 0.55);//1.162); /*  8/2/2001 */\n        DctTh4 = (Int)(((QP << 1) - 0.5) * qmat[32] * 0.8);//1.7583);//0.7942);\n        ColTh = ColThInter[QP];\n    }// get qmat, DctTh1, DctTh2, DctTh3\n\n    rec = currVop->yChan + offset;\n    input = inputFrame->yChan + offset;\n    if (lx != width) input -= (ind_y << 9);  /* non-padded offset */\n\n    dataBlock = video->dataBlock;\n    pred = video->predictedMB;\n\n    for (k = 0; k < 6; k++)\n    {\n        CBP <<= 1;\n        bitmapcol = video->bitmapcol[k];\n        bitmapzz = video->bitmapzz[k];  /*  8/2/01 */\n        if (k < 4)\n        {//Y block\n            sad = video->mot[mbnum][k+1].sad;\n            if (k&1)\n            {\n                rec += 8;\n                input += 8;\n            }\n            else if (k == 2)\n            {\n                dctMode = ((width << 3) - 8);\n                input += dctMode;\n                dctMode = ((lx << 3) - 8);\n                rec += dctMode;\n            }\n        }\n        else\n        {// U, V block\n            if (k == 4)\n            {\n                rec = currVop->uChan + offsetc;\n                input = inputFrame->uChan + offsetc;\n                if (lx != width) input -= (ind_y << 7);\n                lx >>= 1;\n                width >>= 1;\n                if (intra)\n                {\n                    dc_scaler = cal_dc_scalerENC(QP, 2); /* luminance blocks */\n                    DctTh1 = dc_scaler * 3;\n                    sad = getBlockSum(input, width);\n                }\n                else\n                    sad = Sad8x8(input, pred, width);\n            }\n            else\n            {\n                rec = currVop->vChan + offsetc;\n                input = inputFrame->vChan + offsetc;\n                if (lx != width) input -= (ind_y << 7);\n                if (intra)\n                    sad = getBlockSum(input, width);\n                else\n                    sad = Sad8x8(input, pred, width);\n            }\n        }\n\n        if (sad < DctTh1) /* all-zero */\n        {\n            dctMode = 0;\n            CBP |= 0;\n            ncoefblck[k] = 0;\n        }\n        else if (sad < DctTh2) /* DC-only */\n        {\n            dctMode = 1;\n            BlockDCT1x1(dataBlock, input, pred, width);\n\n            CBP |= (*BlockQuantDequantMPEGDC)(dataBlock, output, QP, qmat,\n                                              bitmapcol, bitmaprow + k, bitmapzz, dc_scaler);\n            ncoefblck[k] = 1;\n        }\n        else\n        {\n            dataBlock[64] = ColTh;\n\n            if (sad < DctTh3) /* 2x2-DCT */\n            {\n                dctMode = 2;\n                BlockDCT2x2(dataBlock, input, pred, width);\n                ncoefblck[k] = 6;\n            }\n            else if (sad < DctTh4) /* 4x4 DCT */\n            {\n                dctMode = 4;\n                BlockDCT4x4(dataBlock, input, pred, width);\n                ncoefblck[k] = 26;\n            }\n            else /* full-DCT */\n            {\n                dctMode = 8;\n                BlockDCT8x8(dataBlock, input, pred, width);\n                ncoefblck[k] = 64;\n            }\n\n            CBP |= (*BlockQuantDequantMPEG)(dataBlock, output, QP, qmat,\n                                            bitmapcol, bitmaprow + k, bitmapzz, dctMode, k, dc_scaler); //\n        }\n        dctMode = 8; /* for mismatch handle */\n        BlockIDCTMotionComp(dataBlock, bitmapcol, bitmaprow[k], dctMode, rec, pred, (lx << 1) | (intra));\n\n        output += 64;\n        if (!(k&1))\n        {\n            pred += 8;\n        }\n        else\n        {\n            pred += 120;\n        }\n    }\n\n    video->headerInfo.CBP[mbnum] = CBP; /*  5/18/2001 */\n    return PV_SUCCESS;\n}\n\n#endif\n\n/* ======================================================================== */\n/*  Function : getBlockSAV( )                                               */\n/*  Date     : 8/10/2000                                                    */\n/*  Purpose  : Get SAV for one block                                        */\n/*  In/out   : block[64] contain one block data                             */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\n/* can be written in MMX or SSE,  2/22/2001 */\nInt getBlockSAV(Short block[])\n{\n    Int i, val, sav = 0;\n\n    i = 8;\n    while (i--)\n    {\n        val = *block++;\n        if (val > 0)    sav += val;\n        else        sav -= val;\n        val = *block++;\n        if (val > 0)    sav += val;\n        else        sav -= val;\n        val = *block++;\n        if (val > 0)    sav += val;\n        else        sav -= val;\n        val = *block++;\n        if (val > 0)    sav += val;\n        else        sav -= val;\n        val = *block++;\n        if (val > 0)    sav += val;\n        else        sav -= val;\n        val = *block++;\n        if (val > 0)    sav += val;\n        else        sav -= val;\n        val = *block++;\n        if (val > 0)    sav += val;\n        else        sav -= val;\n        val = *block++;\n        if (val > 0)    sav += val;\n        else        sav -= val;\n    }\n\n    return sav;\n\n}\n\n/* ======================================================================== */\n/*  Function : Sad8x8( )                                                    */\n/*  Date     : 8/10/2000                                                    */\n/*  Purpose  : Find SAD between prev block and current block                */\n/*  In/out   : Previous and current frame block pointers, and frame width   */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/*      8/15/01,  - do 4 pixel at a time    assuming 32 bit register        */\n/* ======================================================================== */\nInt Sad8x8(UChar *cur, UChar *prev, Int width)\n{\n    UChar *end = cur + (width << 3);\n    Int sad = 0;\n    Int *curInt = (Int*) cur;\n    Int *prevInt = (Int*) prev;\n    Int cur1, cur2, prev1, prev2;\n    UInt mask, sgn_msk = 0x80808080;\n    Int  sum2 = 0, sum4 = 0;\n    Int  tmp;\n    do\n    {\n        mask    = ~(0xFF00);\n        cur1    = curInt[1];        /* load cur[4..7] */\n        cur2    = curInt[0];\n        curInt += (width >> 2);     /* load cur[0..3] and +=lx */\n        prev1   = prevInt[1];\n        prev2   = prevInt[0];\n        prevInt += 4;\n\n        tmp     = prev2 ^ cur2;\n        cur2    = prev2 - cur2;\n        tmp     = tmp ^ cur2;       /* (^)^(-) last bit is one if carry */\n        tmp     = sgn_msk & ((UInt)tmp >> 1); /* check the sign of each byte */\n        if (cur2 < 0)   tmp = tmp | 0x80000000; /* corcurt sign of first byte */\n        tmp     = (tmp << 8) - tmp;     /* carry borrowed bytes are marked with 0x1FE */\n        cur2    = cur2 + (tmp >> 7);     /* negative bytes is added with 0xFF, -1 */\n        cur2    = cur2 ^(tmp >> 7); /* take absolute by inverting bits (EOR) */\n\n        tmp     = prev1 ^ cur1;\n        cur1    = prev1 - cur1;\n        tmp     = tmp ^ cur1;       /* (^)^(-) last bit is one if carry */\n        tmp     = sgn_msk & ((UInt)tmp >> 1); /* check the sign of each byte */\n        if (cur1 < 0)   tmp = tmp | 0x80000000; /* corcurt sign of first byte */\n        tmp     = (tmp << 8) - tmp;     /* carry borrowed bytes are marked with 0x1FE */\n        cur1    = cur1 + (tmp >> 7);     /* negative bytes is added with 0xFF, -1 */\n        cur1    = cur1 ^(tmp >> 7); /* take absolute by inverting bits (EOR) */\n\n        sum4    = sum4 + cur1;\n        cur1    = cur1 & (mask << 8);   /* mask first and third bytes */\n        sum2    = sum2 + ((UInt)cur1 >> 8);\n        sum4    = sum4 + cur2;\n        cur2    = cur2 & (mask << 8);   /* mask first and third bytes */\n        sum2    = sum2 + ((UInt)cur2 >> 8);\n    }\n    while ((UInt)curInt < (UInt)end);\n\n    cur1 = sum4 - (sum2 << 8);  /* get even-sum */\n    cur1 = cur1 + sum2;         /* add 16 bit even-sum and odd-sum*/\n    cur1 = cur1 + (cur1 << 16); /* add upper and lower 16 bit sum */\n    sad  = ((UInt)cur1 >> 16);  /* take upper 16 bit */\n    return sad;\n}\n\n/* ======================================================================== */\n/*  Function : getBlockSum( )                                               */\n/*  Date     : 8/10/2000                                                    */\n/*  Purpose  : Find summation of value within a block.                      */\n/*  In/out   : Pointer to current block in a frame and frame width          */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/*          8/15/01,  - SIMD 4 pixels at a time                         */\n/* ======================================================================== */\n\nInt getBlockSum(UChar *cur, Int width)\n{\n    Int sad = 0, sum4 = 0, sum2 = 0;\n    UChar *end = cur + (width << 3);\n    Int *curInt = (Int*)cur;\n    UInt mask   = ~(0xFF00);\n    Int load1, load2;\n\n    do\n    {\n        load1 = curInt[1];\n        load2 = curInt[0];\n        curInt += (width >> 2);\n        sum4 += load1;\n        load1 = load1 & (mask << 8); /* even bytes */\n        sum2 += ((UInt)load1 >> 8); /* sum even bytes, 16 bit */\n        sum4 += load2;\n        load2 = load2 & (mask << 8); /* even bytes */\n        sum2 += ((UInt)load2 >> 8); /* sum even bytes, 16 bit */\n    }\n    while ((UInt)curInt < (UInt)end);\n    load1 = sum4 - (sum2 << 8);     /* get even-sum */\n    load1 = load1 + sum2;           /* add 16 bit even-sum and odd-sum*/\n    load1 = load1 + (load1 << 16);  /* add upper and lower 16 bit sum */\n    sad  = ((UInt)load1 >> 16); /* take upper 16 bit */\n\n    return sad;\n}\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/fastcodemb.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*  =====================================================================   */\n/*  File: FastCodeMB.h                                                      */\n/*  Description: This file contains structure and function prototypes used\n            in FastCodeMB() function. When it is decided to use FastCodeMB\n            instead of CodeMB, all of this prototypes should be migrated to\n            mp4enc_lib.h.                                                   */\n/*  Rev:                                                                    */\n/*  Created: 8/14/01                                                        */\n/* //////////////////////////////////////////////////////////////////////// */\n\ntypedef struct struct_approxDCT  approxDCT;\nstruct struct_approxDCT\n{\n    const Int *scale;\n    Int(*DCT)(Int block[ ], Int coeff[ ], approxDCT *);\n\n    // Threshold value for H.263 Quantizer\n    Int th_app_all[8];\n    Int th_app_odd[8];\n    Int th_app_even[8];\n    Int th_app_even1[8];\n    Int th_app_even2[8];\n};\n\nstruct QPstruct\n{\n    Int QPx2 ;\n    Int QP;\n    Int QPdiv2;\n    Int QPx2plus;\n    Int Addition;\n};\n\n/*---- FastCodeMB.c -----*/\nvoid initCodeMB(approxDCT *function, Int QP);\nPV_STATUS CodeMB_H263(VideoEncData *video, approxDCT *function, Int QP, Int ncoefblck[], Int offset);\nPV_STATUS CodeMB_MPEG(VideoEncData *video, approxDCT *function, Int QP, Int ncoefblck[], Int offset);\nInt getBlockSAV(Int block[]);\nInt Sad8x8(UChar *rec, UChar *prev, Int lx);\nInt getBlockSum(UChar *rec, Int lx);\n\n/*---- AppVCA_dct.c -----*/\nInt     AppVCA1_dct(Int block[], Int out[ ], approxDCT *function);\nInt     AppVCA2_dct(Int block[], Int out[ ], approxDCT *function);\nInt     AppVCA3_dct(Int block[], Int out[ ], approxDCT *function);\nInt     AppVCA4_dct(Int block[], Int out[ ], approxDCT *function);\nInt     AppVCA5_dct(Int block[], Int out[ ], approxDCT *function);\n\n/*---- FastQuant.c -----*/\nInt cal_dc_scalerENC(Int QP, Int type) ;\nInt BlockQuantDequantH263Inter(Int *rcoeff, Int *qcoeff, struct QPstruct *QuantParam,\n                               UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,\n                               Int dctMode, Int comp, Int dummy);\n\nInt BlockQuantDequantH263Intra(Int *rcoeff, Int *qcoeff, struct QPstruct *QuantParam,\n                               UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,\n                               Int dctMode, Int comp, Int dc_scaler);\n\nInt BlockQuantDequantH263DCInter(Int *rcoeff, Int *qcoeff, struct QPstruct *QuantParam,\n                                 UChar *bitmaprow, UInt *bitmapzz, Int dummy);\n\nInt BlockQuantDequantH263DCIntra(Int *rcoeff, Int *qcoeff, struct QPstruct *QuantParam,\n                                 UChar *bitmaprow, UInt *bitmapzz, Int dc_scaler);\n\nInt BlockQuantDequantMPEGInter(Int *rcoeff, Int *qcoeff, Int QP, Int *qmat,\n                               UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,\n                               Int DctMode, Int comp, Int dc_scaler);\n\nInt BlockQuantDequantMPEGIntra(Int *rcoeff, Int *qcoeff, Int QP, Int *qmat,\n                               UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,\n                               Int DctMode, Int comp, Int dc_scaler);\n\nInt BlockQuantDequantMPEGDCInter(Int *rcoeff, Int *qcoeff, Int QP, Int *qmat,\n                                 UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dummy);\n\nInt BlockQuantDequantMPEGDCIntra(Int *rcoeff, Int *qcoeff, Int QP, Int *qmat,\n                                 UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dc_scaler);\n\n/*---- FastIDCT.c -----*/\nvoid BlockIDCTMotionComp(Int *block, UChar *bitmapcol, UChar bitmaprow,\n                         Int dctMode, UChar *rec, Int lx, Int intra);\n\n/*---- motion_comp.c -----*/\nvoid PutSkippedBlock(UChar *rec, UChar *prev, Int lx);\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/fastidct.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"oscl_base_macros.h\" // for OSCL_UNUSED_ARG\n#include \"mp4def.h\"\n#include \"mp4enc_lib.h\"\n#include \"mp4lib_int.h\"\n#include \"dct.h\"\n\n#define ADD_CLIP    { \\\n            tmp = *rec + tmp; \\\n        if((UInt)tmp > mask) tmp = mask&(~(tmp>>31)); \\\n        *rec++ = tmp;   \\\n        }\n\n#define INTRA_CLIP  { \\\n        if((UInt)tmp > mask) tmp = mask&(~(tmp>>31)); \\\n        *rec++ = tmp;   \\\n        }\n\n\n#define CLIP_RESULT(x)      if((UInt)x > 0xFF){x = 0xFF & (~(x>>31));}\n#define ADD_AND_CLIP1(x)    x += (pred_word&0xFF); CLIP_RESULT(x);\n#define ADD_AND_CLIP2(x)    x += ((pred_word>>8)&0xFF); CLIP_RESULT(x);\n#define ADD_AND_CLIP3(x)    x += ((pred_word>>16)&0xFF); CLIP_RESULT(x);\n#define ADD_AND_CLIP4(x)    x += ((pred_word>>24)&0xFF); CLIP_RESULT(x);\n\n\nvoid idct_col0(Short *blk)\n{\n    OSCL_UNUSED_ARG(blk);\n\n    return;\n}\n\nvoid idct_col1(Short *blk)\n{\n    blk[0] = blk[8] = blk[16] = blk[24] = blk[32] = blk[40] = blk[48] = blk[56] =\n                                              blk[0] << 3;\n    return ;\n}\n\nvoid idct_col2(Short *blk)\n{\n    int32 x0, x1, x3, x5, x7;//, x8;\n\n    x1 = blk[8];\n    x0 = ((int32)blk[0] << 11) + 128;\n    /* both upper and lower*/\n\n    x7 = W7 * x1;\n    x1 = W1 * x1;\n\n    x3 = x7;\n    x5 = (181 * (x1 - x7) + 128) >> 8;\n    x7 = (181 * (x1 + x7) + 128) >> 8;\n\n    blk[0] = (x0 + x1) >> 8;\n    blk[8] = (x0 + x7) >> 8;\n    blk[16] = (x0 + x5) >> 8;\n    blk[24] = (x0 + x3) >> 8;\n    blk[56] = (x0 - x1) >> 8;\n    blk[48] = (x0 - x7) >> 8;\n    blk[40] = (x0 - x5) >> 8;\n    blk[32] = (x0 - x3) >> 8;\n    return ;\n}\n\nvoid idct_col3(Short *blk)\n{\n    int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;\n\n    x2 = blk[16];\n    x1 = blk[8];\n    x0 = ((int32)blk[0] << 11) + 128;\n\n    x4 = x0;\n    x6 = W6 * x2;\n    x2 = W2 * x2;\n    x8 = x0 - x2;\n    x0 += x2;\n    x2 = x8;\n    x8 = x4 - x6;\n    x4 += x6;\n    x6 = x8;\n\n    x7 = W7 * x1;\n    x1 = W1 * x1;\n    x3 = x7;\n    x5 = (181 * (x1 - x7) + 128) >> 8;\n    x7 = (181 * (x1 + x7) + 128) >> 8;\n\n    blk[0] = (x0 + x1) >> 8;\n    blk[8] = (x4 + x7) >> 8;\n    blk[16] = (x6 + x5) >> 8;\n    blk[24] = (x2 + x3) >> 8;\n    blk[56] = (x0 - x1) >> 8;\n    blk[48] = (x4 - x7) >> 8;\n    blk[40] = (x6 - x5) >> 8;\n    blk[32] = (x2 - x3) >> 8;\n    return ;\n}\n\nvoid idct_col4(Short *blk)\n{\n    int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;\n    x2 = blk[16];\n    x1 = blk[8];\n    x3 = blk[24];\n    x0 = ((int32)blk[0] << 11) + 128;\n\n    x4 = x0;\n    x6 = W6 * x2;\n    x2 = W2 * x2;\n    x8 = x0 - x2;\n    x0 += x2;\n    x2 = x8;\n    x8 = x4 - x6;\n    x4 += x6;\n    x6 = x8;\n\n    x7 = W7 * x1;\n    x1 = W1 * x1;\n    x5 = W3 * x3;\n    x3 = -W5 * x3;\n    x8 = x1 - x5;\n    x1 += x5;\n    x5 = x8;\n    x8 = x7 - x3;\n    x3 += x7;\n    x7 = (181 * (x5 + x8) + 128) >> 8;\n    x5 = (181 * (x5 - x8) + 128) >> 8;\n\n\n    blk[0] = (x0 + x1) >> 8;\n    blk[8] = (x4 + x7) >> 8;\n    blk[16] = (x6 + x5) >> 8;\n    blk[24] = (x2 + x3) >> 8;\n    blk[56] = (x0 - x1) >> 8;\n    blk[48] = (x4 - x7) >> 8;\n    blk[40] = (x6 - x5) >> 8;\n    blk[32] = (x2 - x3) >> 8;\n    return ;\n}\n\n#ifndef SMALL_DCT\nvoid idct_col0x40(Short *blk)\n{\n    int32 x1, x3, x5, x7;//, x8;\n\n    x1 = blk[8];\n    /* both upper and lower*/\n\n    x7 = W7 * x1;\n    x1 = W1 * x1;\n\n    x3 = x7;\n    x5 = (181 * (x1 - x7) + 128) >> 8;\n    x7 = (181 * (x1 + x7) + 128) >> 8;\n\n    blk[0] = (128 + x1) >> 8;\n    blk[8] = (128 + x7) >> 8;\n    blk[16] = (128 + x5) >> 8;\n    blk[24] = (128 + x3) >> 8;\n    blk[56] = (128 - x1) >> 8;\n    blk[48] = (128 - x7) >> 8;\n    blk[40] = (128 - x5) >> 8;\n    blk[32] = (128 - x3) >> 8;\n\n    return ;\n}\n\nvoid idct_col0x20(Short *blk)\n{\n    int32 x0, x2, x4, x6;\n\n    x2 = blk[16];\n    x6 = W6 * x2;\n    x2 = W2 * x2;\n    x0 = 128 + x2;\n    x2 = 128 - x2;\n    x4 = 128 + x6;\n    x6 = 128 - x6;\n\n    blk[0] = (x0) >> 8;\n    blk[56] = (x0) >> 8;\n    blk[8] = (x4) >> 8;\n    blk[48] = (x4) >> 8;\n    blk[16] = (x6) >> 8;\n    blk[40] = (x6) >> 8;\n    blk[24] = (x2) >> 8;\n    blk[32] = (x2) >> 8;\n\n    return ;\n}\n\nvoid idct_col0x10(Short *blk)\n{\n    int32 x1, x3, x5,  x7;\n\n    x3 = blk[24];\n    x1 = W3 * x3;\n    x3 = W5 * x3;\n\n    x7 = (181 * (x3 - x1) + 128) >> 8;\n    x5 = (-181 * (x1 + x3) + 128) >> 8;\n\n\n    blk[0] = (128 + x1) >> 8;\n    blk[8] = (128 + x7) >> 8;\n    blk[16] = (128 + x5) >> 8;\n    blk[24] = (128 - x3) >> 8;\n    blk[56] = (128 - x1) >> 8;\n    blk[48] = (128 - x7) >> 8;\n    blk[40] = (128 - x5) >> 8;\n    blk[32] = (128 + x3) >> 8;\n\n    return ;\n}\n\n#endif /* SMALL_DCT */\n\nvoid idct_col(Short *blk)\n{\n    int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;\n\n    x1 = (int32)blk[32] << 11;\n    x2 = blk[48];\n    x3 = blk[16];\n    x4 = blk[8];\n    x5 = blk[56];\n    x6 = blk[40];\n    x7 = blk[24];\n    x0 = ((int32)blk[0] << 11) + 128;\n\n    /* first stage */\n    x8 = W7 * (x4 + x5);\n    x4 = x8 + (W1 - W7) * x4;\n    x5 = x8 - (W1 + W7) * x5;\n    x8 = W3 * (x6 + x7);\n    x6 = x8 - (W3 - W5) * x6;\n    x7 = x8 - (W3 + W5) * x7;\n\n    /* second stage */\n    x8 = x0 + x1;\n    x0 -= x1;\n    x1 = W6 * (x3 + x2);\n    x2 = x1 - (W2 + W6) * x2;\n    x3 = x1 + (W2 - W6) * x3;\n    x1 = x4 + x6;\n    x4 -= x6;\n    x6 = x5 + x7;\n    x5 -= x7;\n\n    /* third stage */\n    x7 = x8 + x3;\n    x8 -= x3;\n    x3 = x0 + x2;\n    x0 -= x2;\n    x2 = (181 * (x4 + x5) + 128) >> 8;\n    x4 = (181 * (x4 - x5) + 128) >> 8;\n\n    /* fourth stage */\n    blk[0]    = (x7 + x1) >> 8;\n    blk[8] = (x3 + x2) >> 8;\n    blk[16] = (x0 + x4) >> 8;\n    blk[24] = (x8 + x6) >> 8;\n    blk[32] = (x8 - x6) >> 8;\n    blk[40] = (x0 - x4) >> 8;\n    blk[48] = (x3 - x2) >> 8;\n    blk[56] = (x7 - x1) >> 8;\n\n    return ;\n}\n\n/* This function should not be called at all ****/\nvoid idct_row0Inter(Short *srce, UChar *rec, Int lx)\n{\n    OSCL_UNUSED_ARG(srce);\n\n    OSCL_UNUSED_ARG(rec);\n\n    OSCL_UNUSED_ARG(lx);\n\n    return;\n}\n\nvoid idct_row1Inter(Short *blk, UChar *rec, Int lx)\n{\n    int tmp;\n    int i = 8;\n    uint32 pred_word, dst_word;\n    int res, res2;\n\n    /* preset the offset, such that we can take advantage pre-offset addressing mode   */\n    rec -= lx;\n    blk -= 8;\n\n    while (i--)\n    {\n        tmp = (*(blk += 8) + 32) >> 6;\n        *blk = 0;\n\n        pred_word = *((uint32*)(rec += lx)); /* read 4 bytes from pred */\n        res = tmp + (pred_word & 0xFF);\n        CLIP_RESULT(res);\n        res2 = tmp + ((pred_word >> 8) & 0xFF);\n        CLIP_RESULT(res2);\n        dst_word = (res2 << 8) | res;\n        res = tmp + ((pred_word >> 16) & 0xFF);\n        CLIP_RESULT(res);\n        dst_word |= (res << 16);\n        res = tmp + ((pred_word >> 24) & 0xFF);\n        CLIP_RESULT(res);\n        dst_word |= (res << 24);\n        *((uint32*)rec) = dst_word; /* save 4 bytes to dst */\n\n        pred_word = *((uint32*)(rec + 4)); /* read 4 bytes from pred */\n        res = tmp + (pred_word & 0xFF);\n        CLIP_RESULT(res);\n        res2 = tmp + ((pred_word >> 8) & 0xFF);\n        CLIP_RESULT(res2);\n        dst_word = (res2 << 8) | res;\n        res = tmp + ((pred_word >> 16) & 0xFF);\n        CLIP_RESULT(res);\n        dst_word |= (res << 16);\n        res = tmp + ((pred_word >> 24) & 0xFF);\n        CLIP_RESULT(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */\n    }\n    return;\n}\n\nvoid idct_row2Inter(Short *blk, UChar *rec, Int lx)\n{\n    int32 x0, x1, x2, x4, x5;\n    int i = 8;\n    uint32 pred_word, dst_word;\n    int res, res2;\n\n    /* preset the offset, such that we can take advantage pre-offset addressing mode   */\n    rec -= lx;\n    blk -= 8;\n\n    while (i--)\n    {\n        /* shortcut */\n        x4 = blk[9];\n        blk[9] = 0;\n        x0 = ((*(blk += 8)) << 8) + 8192;\n        *blk = 0;  /* for proper rounding in the fourth stage */\n\n        /* first stage */\n        x5 = (W7 * x4 + 4) >> 3;\n        x4 = (W1 * x4 + 4) >> 3;\n\n        /* third stage */\n        x2 = (181 * (x4 + x5) + 128) >> 8;\n        x1 = (181 * (x4 - x5) + 128) >> 8;\n\n        /* fourth stage */\n        pred_word = *((uint32*)(rec += lx)); /* read 4 bytes from pred */\n        res = (x0 + x4) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x0 + x2) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x0 + x1) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x0 + x5) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)rec) = dst_word; /* save 4 bytes to dst */\n\n        pred_word = *((uint32*)(rec + 4)); /* read 4 bytes from pred */\n        res = (x0 - x5) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x0 - x1) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x0 - x2) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x0 - x4) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */\n    }\n    return ;\n}\n\nvoid idct_row3Inter(Short *blk, UChar *rec, Int lx)\n{\n    int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;\n    int i = 8;\n    uint32 pred_word, dst_word;\n    int res, res2;\n\n    /* preset the offset, such that we can take advantage pre-offset addressing mode   */\n    rec -= lx;\n    blk -= 8;\n\n    while (i--)\n    {\n        x2 = blk[10];\n        blk[10] = 0;\n        x1 = blk[9];\n        blk[9] = 0;\n        x0 = ((*(blk += 8)) << 8) + 8192;\n        *blk = 0;  /* for proper rounding in the fourth stage */\n        /* both upper and lower*/\n        /* both x2orx6 and x0orx4 */\n\n        x4 = x0;\n        x6 = (W6 * x2 + 4) >> 3;\n        x2 = (W2 * x2 + 4) >> 3;\n        x8 = x0 - x2;\n        x0 += x2;\n        x2 = x8;\n        x8 = x4 - x6;\n        x4 += x6;\n        x6 = x8;\n\n        x7 = (W7 * x1 + 4) >> 3;\n        x1 = (W1 * x1 + 4) >> 3;\n        x3 = x7;\n        x5 = (181 * (x1 - x7) + 128) >> 8;\n        x7 = (181 * (x1 + x7) + 128) >> 8;\n\n        pred_word = *((uint32*)(rec += lx)); /* read 4 bytes from pred */\n        res = (x0 + x1) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x4 + x7) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x6 + x5) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x2 + x3) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)rec) = dst_word; /* save 4 bytes to dst */\n\n        pred_word = *((uint32*)(rec + 4)); /* read 4 bytes from pred */\n        res = (x2 - x3) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x6 - x5) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x4 - x7) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x0 - x1) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */\n    }\n\n    return ;\n}\n\nvoid idct_row4Inter(Short *blk, UChar *rec, Int lx)\n{\n    int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;\n    int i = 8;\n    uint32 pred_word, dst_word;\n    int res, res2;\n\n    /* preset the offset, such that we can take advantage pre-offset addressing mode   */\n    rec -= lx;\n    blk -= 8;\n\n    while (i--)\n    {\n        x2 = blk[10];\n        blk[10] = 0;\n        x1 = blk[9];\n        blk[9] = 0;\n        x3 = blk[11];\n        blk[11] = 0;\n        x0 = ((*(blk += 8)) << 8) + 8192;\n        *blk = 0;   /* for proper rounding in the fourth stage */\n\n        x4 = x0;\n        x6 = (W6 * x2 + 4) >> 3;\n        x2 = (W2 * x2 + 4) >> 3;\n        x8 = x0 - x2;\n        x0 += x2;\n        x2 = x8;\n        x8 = x4 - x6;\n        x4 += x6;\n        x6 = x8;\n\n        x7 = (W7 * x1 + 4) >> 3;\n        x1 = (W1 * x1 + 4) >> 3;\n        x5 = (W3 * x3 + 4) >> 3;\n        x3 = (- W5 * x3 + 4) >> 3;\n        x8 = x1 - x5;\n        x1 += x5;\n        x5 = x8;\n        x8 = x7 - x3;\n        x3 += x7;\n        x7 = (181 * (x5 + x8) + 128) >> 8;\n        x5 = (181 * (x5 - x8) + 128) >> 8;\n\n        pred_word = *((uint32*)(rec += lx)); /* read 4 bytes from pred */\n        res = (x0 + x1) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x4 + x7) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x6 + x5) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x2 + x3) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)rec) = dst_word; /* save 4 bytes to dst */\n\n        pred_word = *((uint32*)(rec + 4)); /* read 4 bytes from pred */\n        res = (x2 - x3) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x6 - x5) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x4 - x7) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x0 - x1) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */\n    }\n    return ;\n}\n\n#ifndef SMALL_DCT\nvoid idct_row0x40Inter(Short *blk, UChar *rec, Int lx)\n{\n    int32 x1, x2, x4, x5;\n    int i = 8;\n    uint32 pred_word, dst_word;\n    int res, res2;\n\n    /* preset the offset, such that we can take advantage pre-offset addressing mode   */\n    rec -= lx;\n\n    while (i--)\n    {\n        /* shortcut */\n        x4 = blk[1];\n        blk[1] = 0;\n        blk += 8;  /* for proper rounding in the fourth stage */\n\n        /* first stage */\n        x5 = (W7 * x4 + 4) >> 3;\n        x4 = (W1 * x4 + 4) >> 3;\n\n        /* third stage */\n        x2 = (181 * (x4 + x5) + 128) >> 8;\n        x1 = (181 * (x4 - x5) + 128) >> 8;\n\n        /* fourth stage */\n        pred_word = *((uint32*)(rec += lx)); /* read 4 bytes from pred */\n        res = (8192 + x4) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (8192 + x2) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (8192 + x1) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (8192 + x5) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)rec) = dst_word; /* save 4 bytes to dst */\n\n        pred_word = *((uint32*)(rec + 4)); /* read 4 bytes from pred */\n        res = (8192 - x5) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (8192 - x1) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (8192 - x2) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (8192 - x4) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */\n    }\n    return ;\n}\n\nvoid idct_row0x20Inter(Short *blk, UChar *rec, Int lx)\n{\n    int32 x0, x2, x4, x6;\n    int i = 8;\n    uint32 pred_word, dst_word;\n    int res, res2;\n\n    /* preset the offset, such that we can take advantage pre-offset addressing mode   */\n    rec -= lx;\n\n    while (i--)\n    {\n        x2 = blk[2];\n        blk[2] = 0;\n        blk += 8; /* for proper rounding in the fourth stage */\n        /* both upper and lower*/\n        /* both x2orx6 and x0orx4 */\n        x6 = (W6 * x2 + 4) >> 3;\n        x2 = (W2 * x2 + 4) >> 3;\n        x0 = 8192 + x2;\n        x2 = 8192 - x2;\n        x4 = 8192 + x6;\n        x6 = 8192 - x6;\n\n        pred_word = *((uint32*)(rec += lx)); /* read 4 bytes from pred */\n        res = (x0) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x4) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x6) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x2) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)rec) = dst_word; /* save 4 bytes to dst */\n\n        pred_word = *((uint32*)(rec + 4)); /* read 4 bytes from pred */\n        res = (x2) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x6) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x4) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x0) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */\n    }\n\n    return ;\n}\n\nvoid idct_row0x10Inter(Short *blk, UChar *rec, Int lx)\n{\n    int32 x1, x3, x5, x7;\n    int i = 8;\n    uint32 pred_word, dst_word;\n    int res, res2;\n\n    /* preset the offset, such that we can take advantage pre-offset addressing mode   */\n    rec -= lx;\n\n    while (i--)\n    {\n        x3 = blk[3];\n        blk[3] = 0;\n        blk += 8;\n\n        x1 = (W3 * x3 + 4) >> 3;\n        x3 = (-W5 * x3 + 4) >> 3;\n\n        x7 = (-181 * (x3 + x1) + 128) >> 8;\n        x5 = (181 * (x3 - x1) + 128) >> 8;\n\n        pred_word = *((uint32*)(rec += lx)); /* read 4 bytes from pred */\n        res = (8192 + x1) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (8192 + x7) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (8192 + x5) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (8192 + x3) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)rec) = dst_word; /* save 4 bytes to dst */\n\n        pred_word = *((uint32*)(rec + 4)); /* read 4 bytes from pred */\n        res = (8192 - x3) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (8192 - x5) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (8192 - x7) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (8192 - x1) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */\n    }\n    return ;\n}\n\n#endif /* SMALL_DCT */\n\nvoid idct_rowInter(Short *blk, UChar *rec, Int lx)\n{\n    int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;\n    int i = 8;\n    uint32 pred_word, dst_word;\n    int res, res2;\n\n    /* preset the offset, such that we can take advantage pre-offset addressing mode   */\n    rec -= lx;\n    blk -= 8;\n\n    while (i--)\n    {\n        x1 = (int32)blk[12] << 8;\n        blk[12] = 0;\n        x2 = blk[14];\n        blk[14] = 0;\n        x3 = blk[10];\n        blk[10] = 0;\n        x4 = blk[9];\n        blk[9] = 0;\n        x5 = blk[15];\n        blk[15] = 0;\n        x6 = blk[13];\n        blk[13] = 0;\n        x7 = blk[11];\n        blk[11] = 0;\n        x0 = ((*(blk += 8)) << 8) + 8192;\n        *blk = 0;   /* for proper rounding in the fourth stage */\n\n        /* first stage */\n        x8 = W7 * (x4 + x5) + 4;\n        x4 = (x8 + (W1 - W7) * x4) >> 3;\n        x5 = (x8 - (W1 + W7) * x5) >> 3;\n        x8 = W3 * (x6 + x7) + 4;\n        x6 = (x8 - (W3 - W5) * x6) >> 3;\n        x7 = (x8 - (W3 + W5) * x7) >> 3;\n\n        /* second stage */\n        x8 = x0 + x1;\n        x0 -= x1;\n        x1 = W6 * (x3 + x2) + 4;\n        x2 = (x1 - (W2 + W6) * x2) >> 3;\n        x3 = (x1 + (W2 - W6) * x3) >> 3;\n        x1 = x4 + x6;\n        x4 -= x6;\n        x6 = x5 + x7;\n        x5 -= x7;\n\n        /* third stage */\n        x7 = x8 + x3;\n        x8 -= x3;\n        x3 = x0 + x2;\n        x0 -= x2;\n        x2 = (181 * (x4 + x5) + 128) >> 8;\n        x4 = (181 * (x4 - x5) + 128) >> 8;\n\n        /* fourth stage */\n        pred_word = *((uint32*)(rec += lx)); /* read 4 bytes from pred */\n\n        res = (x7 + x1) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x3 + x2) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x0 + x4) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x8 + x6) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)rec) = dst_word; /* save 4 bytes to dst */\n\n        pred_word = *((uint32*)(rec + 4)); /* read 4 bytes from pred */\n\n        res = (x8 - x6) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x0 - x4) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x3 - x2) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x7 - x1) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */\n    }\n    return;\n}\n\nvoid idct_row0Intra(Short *srce, UChar *rec, Int lx)\n{\n    OSCL_UNUSED_ARG(srce);\n\n    OSCL_UNUSED_ARG(rec);\n\n    OSCL_UNUSED_ARG(lx);\n\n    return;\n}\n\nvoid idct_row1Intra(Short *blk, UChar *rec, Int lx)\n{\n    int32 tmp;\n    int i = 8;\n\n    rec -= lx;\n    blk -= 8;\n    while (i--)\n    {\n        tmp = ((*(blk += 8) + 32) >> 6);\n        *blk = 0;\n        CLIP_RESULT(tmp)\n\n        tmp |= (tmp << 8);\n        tmp |= (tmp << 16);\n        *((uint32*)(rec += lx)) = tmp;\n        *((uint32*)(rec + 4)) = tmp;\n    }\n    return;\n}\n\nvoid idct_row2Intra(Short *blk, UChar *rec, Int lx)\n{\n    int32 x0, x1, x2, x4, x5;\n    int res, res2;\n    uint32 dst_word;\n    int i = 8;\n\n    rec -= lx;\n    blk -= 8;\n    while (i--)\n    {\n        /* shortcut */\n        x4 = blk[9];\n        blk[9] = 0;\n        x0 = ((*(blk += 8)) << 8) + 8192;\n        *blk = 0;   /* for proper rounding in the fourth stage */\n\n        /* first stage */\n        x5 = (W7 * x4 + 4) >> 3;\n        x4 = (W1 * x4 + 4) >> 3;\n\n        /* third stage */\n        x2 = (181 * (x4 + x5) + 128) >> 8;\n        x1 = (181 * (x4 - x5) + 128) >> 8;\n\n        /* fourth stage */\n        res = ((x0 + x4) >> 14);\n        CLIP_RESULT(res)\n        res2 = ((x0 + x2) >> 14);\n        CLIP_RESULT(res2)\n        dst_word = (res2 << 8) | res;\n        res = ((x0 + x1) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 16);\n        res = ((x0 + x5) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 24);\n        *((uint32*)(rec += lx)) = dst_word;\n\n        res = ((x0 - x5) >> 14);\n        CLIP_RESULT(res)\n        res2 = ((x0 - x1) >> 14);\n        CLIP_RESULT(res2)\n        dst_word = (res2 << 8) | res;\n        res = ((x0 - x2) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 16);\n        res = ((x0 - x4) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word;\n    }\n    return ;\n}\n\nvoid idct_row3Intra(Short *blk, UChar *rec, Int lx)\n{\n    int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;\n    int res, res2;\n    uint32 dst_word;\n    int i = 8;\n\n    rec -= lx;\n    blk -= 8;\n    while (i--)\n    {\n        x2 = blk[10];\n        blk[10] = 0;\n        x1 = blk[9];\n        blk[9] = 0;\n        x0 = ((*(blk += 8)) << 8) + 8192;\n        *blk = 0;/* for proper rounding in the fourth stage */\n        /* both upper and lower*/\n        /* both x2orx6 and x0orx4 */\n\n        x4 = x0;\n        x6 = (W6 * x2 + 4) >> 3;\n        x2 = (W2 * x2 + 4) >> 3;\n        x8 = x0 - x2;\n        x0 += x2;\n        x2 = x8;\n        x8 = x4 - x6;\n        x4 += x6;\n        x6 = x8;\n\n        x7 = (W7 * x1 + 4) >> 3;\n        x1 = (W1 * x1 + 4) >> 3;\n        x3 = x7;\n        x5 = (181 * (x1 - x7) + 128) >> 8;\n        x7 = (181 * (x1 + x7) + 128) >> 8;\n\n        res = ((x0 + x1) >> 14);\n        CLIP_RESULT(res)\n        res2 = ((x4 + x7) >> 14);\n        CLIP_RESULT(res2)\n        dst_word = (res2 << 8) | res;\n        res = ((x6 + x5) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 16);\n        res = ((x2 + x3) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 24);\n        *((uint32*)(rec += lx)) = dst_word;\n\n        res = ((x2 - x3) >> 14);\n        CLIP_RESULT(res)\n        res2 = ((x6 - x5) >> 14);\n        CLIP_RESULT(res2)\n        dst_word = (res2 << 8) | res;\n        res = ((x4 - x7) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 16);\n        res = ((x0 - x1) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word;\n\n    }\n    return ;\n}\n\nvoid idct_row4Intra(Short *blk, UChar *rec, Int lx)\n{\n    int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;\n    int res, res2;\n    uint32 dst_word;\n    int i = 8;\n\n    rec -= lx;\n    blk -= 8;\n    while (i--)\n    {\n        x2 = blk[10];\n        blk[10] = 0;\n        x1 = blk[9];\n        blk[9] = 0;\n        x3 = blk[11];\n        blk[11] = 0;\n        x0 = ((*(blk += 8)) << 8) + 8192;\n        *blk = 0; /* for proper rounding in the fourth stage */\n\n        x4 = x0;\n        x6 = (W6 * x2 + 4) >> 3;\n        x2 = (W2 * x2 + 4) >> 3;\n        x8 = x0 - x2;\n        x0 += x2;\n        x2 = x8;\n        x8 = x4 - x6;\n        x4 += x6;\n        x6 = x8;\n\n        x7 = (W7 * x1 + 4) >> 3;\n        x1 = (W1 * x1 + 4) >> 3;\n        x5 = (W3 * x3 + 4) >> 3;\n        x3 = (- W5 * x3 + 4) >> 3;\n        x8 = x1 - x5;\n        x1 += x5;\n        x5 = x8;\n        x8 = x7 - x3;\n        x3 += x7;\n        x7 = (181 * (x5 + x8) + 128) >> 8;\n        x5 = (181 * (x5 - x8) + 128) >> 8;\n\n        res = ((x0 + x1) >> 14);\n        CLIP_RESULT(res)\n        res2 = ((x4 + x7) >> 14);\n        CLIP_RESULT(res2)\n        dst_word = (res2 << 8) | res;\n        res = ((x6 + x5) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 16);\n        res = ((x2 + x3) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 24);\n        *((uint32*)(rec += lx)) = dst_word;\n\n        res = ((x2 - x3) >> 14);\n        CLIP_RESULT(res)\n        res2 = ((x6 - x5) >> 14);\n        CLIP_RESULT(res2)\n        dst_word = (res2 << 8) | res;\n        res = ((x4 - x7) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 16);\n        res = ((x0 - x1) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word;\n    }\n\n    return ;\n}\n\n#ifndef SMALL_DCT\nvoid idct_row0x40Intra(Short *blk, UChar *rec, Int lx)\n{\n    int32  x1, x2, x4, x5;\n    int res, res2;\n    uint32 dst_word;\n    int i = 8;\n\n    rec -= lx;\n\n    while (i--)\n    {\n        /* shortcut */\n        x4 = blk[1];\n        blk[1] = 0;\n        blk += 8;\n\n        /* first stage */\n        x5 = (W7 * x4 + 4) >> 3;\n        x4 = (W1 * x4 + 4) >> 3;\n\n        /* third stage */\n        x2 = (181 * (x4 + x5) + 128) >> 8;\n        x1 = (181 * (x4 - x5) + 128) >> 8;\n\n        /* fourth stage */\n        res = ((8192 + x4) >> 14);\n        CLIP_RESULT(res)\n        res2 = ((8192 + x2) >> 14);\n        CLIP_RESULT(res2)\n        dst_word = (res2 << 8) | res;\n        res = ((8192 + x1) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 16);\n        res = ((8192 + x5) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 24);\n        *((uint32*)(rec += lx)) = dst_word;\n\n        res = ((8192 - x5) >> 14);\n        CLIP_RESULT(res)\n        res2 = ((8192 - x1) >> 14);\n        CLIP_RESULT(res2)\n        dst_word = (res2 << 8) | res;\n        res = ((8192 - x2) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 16);\n        res = ((8192 - x4) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word;\n\n    }\n    return ;\n}\n\nvoid idct_row0x20Intra(Short *blk, UChar *rec, Int lx)\n{\n    int32 x0, x2, x4, x6;\n    int res, res2;\n    uint32 dst_word;\n    int i = 8;\n\n    rec -= lx;\n    while (i--)\n    {\n        x2 = blk[2];\n        blk[2] = 0;\n        blk += 8;\n\n        /* both upper and lower*/\n        /* both x2orx6 and x0orx4 */\n        x6 = (W6 * x2 + 4) >> 3;\n        x2 = (W2 * x2 + 4) >> 3;\n        x0 = 8192 + x2;\n        x2 = 8192 - x2;\n        x4 = 8192 + x6;\n        x6 = 8192 - x6;\n\n        res = ((x0) >> 14);\n        CLIP_RESULT(res)\n        res2 = ((x4) >> 14);\n        CLIP_RESULT(res2)\n        dst_word = (res2 << 8) | res;\n        res = ((x6) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 16);\n        res = ((x2) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 24);\n        *((uint32*)(rec += lx)) = dst_word;\n\n        res = ((x2) >> 14);\n        CLIP_RESULT(res)\n        res2 = ((x6) >> 14);\n        CLIP_RESULT(res2)\n        dst_word = (res2 << 8) | res;\n        res = ((x4) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 16);\n        res = ((x0) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word;\n\n    }\n    return ;\n}\n\nvoid idct_row0x10Intra(Short *blk, UChar *rec, Int lx)\n{\n    int32 x1, x3, x5, x7;\n    int res, res2;\n    uint32 dst_word;\n    int i = 8;\n\n    rec -= lx;\n    while (i--)\n    {\n        x3 = blk[3];\n        blk[3] = 0 ;\n        blk += 8;\n\n        x1 = (W3 * x3 + 4) >> 3;\n        x3 = (W5 * x3 + 4) >> 3;\n\n        x7 = (181 * (x3 - x1) + 128) >> 8;\n        x5 = (-181 * (x1 + x3) + 128) >> 8;\n\n        res = ((8192 + x1) >> 14);\n        CLIP_RESULT(res)\n        res2 = ((8192 + x7) >> 14);\n        CLIP_RESULT(res2)\n        dst_word = (res2 << 8) | res;\n        res = ((8192 + x5) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 16);\n        res = ((8192 - x3) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 24);\n        *((uint32*)(rec += lx)) = dst_word;\n\n        res = ((8192 + x3) >> 14);\n        CLIP_RESULT(res)\n        res2 = ((8192 - x5) >> 14);\n        CLIP_RESULT(res2)\n        dst_word = (res2 << 8) | res;\n        res = ((8192 - x7) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 16);\n        res = ((8192 - x1) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word;\n\n    }\n\n    return ;\n}\n\n#endif /* SMALL_DCT */\nvoid idct_rowIntra(Short *blk, UChar *rec, Int lx)\n{\n    int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;\n    int i = 8;\n    int res, res2;\n    uint32 dst_word;\n\n    blk -= 8;\n    rec -= lx;\n\n    while (i--)\n    {\n        x1 = (int32)blk[12] << 8;\n        blk[12] = 0;\n        x2 = blk[14];\n        blk[14] = 0;\n        x3 = blk[10];\n        blk[10] = 0;\n        x4 = blk[9];\n        blk[9] = 0;\n        x5 = blk[15];\n        blk[15] = 0;\n        x6 = blk[13];\n        blk[13] = 0;\n        x7 = blk[11];\n        blk[11] = 0;\n        x0 = ((*(blk += 8)) << 8) + 8192;\n        *blk = 0;  /* for proper rounding in the fourth stage */\n\n        /* first stage */\n        x8 = W7 * (x4 + x5) + 4;\n        x4 = (x8 + (W1 - W7) * x4) >> 3;\n        x5 = (x8 - (W1 + W7) * x5) >> 3;\n        x8 = W3 * (x6 + x7) + 4;\n        x6 = (x8 - (W3 - W5) * x6) >> 3;\n        x7 = (x8 - (W3 + W5) * x7) >> 3;\n\n        /* second stage */\n        x8 = x0 + x1;\n        x0 -= x1;\n        x1 = W6 * (x3 + x2) + 4;\n        x2 = (x1 - (W2 + W6) * x2) >> 3;\n        x3 = (x1 + (W2 - W6) * x3) >> 3;\n        x1 = x4 + x6;\n        x4 -= x6;\n        x6 = x5 + x7;\n        x5 -= x7;\n\n        /* third stage */\n        x7 = x8 + x3;\n        x8 -= x3;\n        x3 = x0 + x2;\n        x0 -= x2;\n        x2 = (181 * (x4 + x5) + 128) >> 8;\n        x4 = (181 * (x4 - x5) + 128) >> 8;\n\n        /* fourth stage */\n        res = ((x7 + x1) >> 14);\n        CLIP_RESULT(res)\n        res2 = ((x3 + x2) >> 14);\n        CLIP_RESULT(res2)\n        dst_word = res | (res2 << 8);\n        res = ((x0 + x4) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 16);\n        res = ((x8 + x6) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 24);\n        *((uint32*)(rec += lx)) = dst_word;\n\n        res = ((x8 - x6) >> 14);\n        CLIP_RESULT(res)\n        res2 = ((x0 - x4) >> 14);\n        CLIP_RESULT(res2)\n        dst_word = res | (res2 << 8);\n        res = ((x3 - x2) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 16);\n        res = ((x7 - x1) >> 14);\n        CLIP_RESULT(res)\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word;\n    }\n    return;\n}\n\n\n/* This function should not be called at all ****/\nvoid idct_row0zmv(Short *srce, UChar *rec, UChar *pred, Int lx)\n{\n    OSCL_UNUSED_ARG(srce);\n    OSCL_UNUSED_ARG(rec);\n    OSCL_UNUSED_ARG(pred);\n    OSCL_UNUSED_ARG(lx);\n\n    return;\n}\n\nvoid idct_row1zmv(Short *blk, UChar *rec, UChar *pred, Int lx)\n{\n    int tmp;\n    int i = 8;\n    uint32 pred_word, dst_word;\n    int res, res2;\n\n    /* preset the offset, such that we can take advantage pre-offset addressing mode   */\n    pred -= 16;\n    rec -= lx;\n    blk -= 8;\n\n    while (i--)\n    {\n        tmp = (*(blk += 8) + 32) >> 6;\n        *blk = 0;\n\n        pred_word = *((uint32*)(pred += 16)); /* read 4 bytes from pred */\n        res = tmp + (pred_word & 0xFF);\n        CLIP_RESULT(res);\n        res2 = tmp + ((pred_word >> 8) & 0xFF);\n        CLIP_RESULT(res2);\n        dst_word = (res2 << 8) | res;\n        res = tmp + ((pred_word >> 16) & 0xFF);\n        CLIP_RESULT(res);\n        dst_word |= (res << 16);\n        res = tmp + ((pred_word >> 24) & 0xFF);\n        CLIP_RESULT(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec += lx)) = dst_word; /* save 4 bytes to dst */\n\n        pred_word = *((uint32*)(pred + 4)); /* read 4 bytes from pred */\n        res = tmp + (pred_word & 0xFF);\n        CLIP_RESULT(res);\n        res2 = tmp + ((pred_word >> 8) & 0xFF);\n        CLIP_RESULT(res2);\n        dst_word = (res2 << 8) | res;\n        res = tmp + ((pred_word >> 16) & 0xFF);\n        CLIP_RESULT(res);\n        dst_word |= (res << 16);\n        res = tmp + ((pred_word >> 24) & 0xFF);\n        CLIP_RESULT(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */\n    }\n    return;\n}\n\nvoid idct_row2zmv(Short *blk, UChar *rec, UChar *pred, Int lx)\n{\n    int32 x0, x1, x2, x4, x5;\n    int i = 8;\n    uint32 pred_word, dst_word;\n    int res, res2;\n\n    /* preset the offset, such that we can take advantage pre-offset addressing mode   */\n    rec -= lx;\n    pred -= 16;\n    blk -= 8;\n\n    while (i--)\n    {\n        /* shortcut */\n        x4 = blk[9];\n        blk[9] = 0;\n        x0 = ((*(blk += 8)) << 8) + 8192;\n        *blk = 0;  /* for proper rounding in the fourth stage */\n\n        /* first stage */\n        x5 = (W7 * x4 + 4) >> 3;\n        x4 = (W1 * x4 + 4) >> 3;\n\n        /* third stage */\n        x2 = (181 * (x4 + x5) + 128) >> 8;\n        x1 = (181 * (x4 - x5) + 128) >> 8;\n\n        /* fourth stage */\n        pred_word = *((uint32*)(pred += 16)); /* read 4 bytes from pred */\n        res = (x0 + x4) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x0 + x2) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x0 + x1) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x0 + x5) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec += lx)) = dst_word; /* save 4 bytes to dst */\n\n        pred_word = *((uint32*)(pred + 4)); /* read 4 bytes from pred */\n        res = (x0 - x5) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x0 - x1) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x0 - x2) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x0 - x4) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */\n    }\n    return ;\n}\n\nvoid idct_row3zmv(Short *blk, UChar *rec, UChar *pred, Int lx)\n{\n    int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;\n    int i = 8;\n    uint32 pred_word, dst_word;\n    int res, res2;\n\n    /* preset the offset, such that we can take advantage pre-offset addressing mode   */\n    rec -= lx;\n    pred -= 16;\n    blk -= 8;\n\n    while (i--)\n    {\n        x2 = blk[10];\n        blk[10] = 0;\n        x1 = blk[9];\n        blk[9] = 0;\n        x0 = ((*(blk += 8)) << 8) + 8192;\n        *blk = 0;  /* for proper rounding in the fourth stage */\n        /* both upper and lower*/\n        /* both x2orx6 and x0orx4 */\n\n        x4 = x0;\n        x6 = (W6 * x2 + 4) >> 3;\n        x2 = (W2 * x2 + 4) >> 3;\n        x8 = x0 - x2;\n        x0 += x2;\n        x2 = x8;\n        x8 = x4 - x6;\n        x4 += x6;\n        x6 = x8;\n\n        x7 = (W7 * x1 + 4) >> 3;\n        x1 = (W1 * x1 + 4) >> 3;\n        x3 = x7;\n        x5 = (181 * (x1 - x7) + 128) >> 8;\n        x7 = (181 * (x1 + x7) + 128) >> 8;\n\n        pred_word = *((uint32*)(pred += 16)); /* read 4 bytes from pred */\n        res = (x0 + x1) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x4 + x7) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x6 + x5) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x2 + x3) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec += lx)) = dst_word; /* save 4 bytes to dst */\n\n        pred_word = *((uint32*)(pred + 4)); /* read 4 bytes from pred */\n        res = (x2 - x3) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x6 - x5) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x4 - x7) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x0 - x1) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */\n    }\n\n    return ;\n}\n\nvoid idct_row4zmv(Short *blk, UChar *rec, UChar *pred, Int lx)\n{\n    int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;\n    int i = 8;\n    uint32 pred_word, dst_word;\n    int res, res2;\n\n    /* preset the offset, such that we can take advantage pre-offset addressing mode   */\n    rec -= lx;\n    pred -= 16;\n    blk -= 8;\n\n    while (i--)\n    {\n        x2 = blk[10];\n        blk[10] = 0;\n        x1 = blk[9];\n        blk[9] = 0;\n        x3 = blk[11];\n        blk[11] = 0;\n        x0 = ((*(blk += 8)) << 8) + 8192;\n        *blk = 0;   /* for proper rounding in the fourth stage */\n\n        x4 = x0;\n        x6 = (W6 * x2 + 4) >> 3;\n        x2 = (W2 * x2 + 4) >> 3;\n        x8 = x0 - x2;\n        x0 += x2;\n        x2 = x8;\n        x8 = x4 - x6;\n        x4 += x6;\n        x6 = x8;\n\n        x7 = (W7 * x1 + 4) >> 3;\n        x1 = (W1 * x1 + 4) >> 3;\n        x5 = (W3 * x3 + 4) >> 3;\n        x3 = (- W5 * x3 + 4) >> 3;\n        x8 = x1 - x5;\n        x1 += x5;\n        x5 = x8;\n        x8 = x7 - x3;\n        x3 += x7;\n        x7 = (181 * (x5 + x8) + 128) >> 8;\n        x5 = (181 * (x5 - x8) + 128) >> 8;\n\n        pred_word = *((uint32*)(pred += 16)); /* read 4 bytes from pred */\n        res = (x0 + x1) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x4 + x7) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x6 + x5) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x2 + x3) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec += lx)) = dst_word; /* save 4 bytes to dst */\n\n        pred_word = *((uint32*)(pred + 4)); /* read 4 bytes from pred */\n        res = (x2 - x3) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x6 - x5) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x4 - x7) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x0 - x1) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */\n    }\n    return ;\n}\n\n#ifndef SMALL_DCT\nvoid idct_row0x40zmv(Short *blk, UChar *rec, UChar *pred, Int lx)\n{\n    int32 x1, x2, x4, x5;\n    int i = 8;\n    uint32 pred_word, dst_word;\n    int res, res2;\n\n    /* preset the offset, such that we can take advantage pre-offset addressing mode   */\n    rec -= lx;\n    pred -= 16;\n\n    while (i--)\n    {\n        /* shortcut */\n        x4 = blk[1];\n        blk[1] = 0;\n        blk += 8;  /* for proper rounding in the fourth stage */\n\n        /* first stage */\n        x5 = (W7 * x4 + 4) >> 3;\n        x4 = (W1 * x4 + 4) >> 3;\n\n        /* third stage */\n        x2 = (181 * (x4 + x5) + 128) >> 8;\n        x1 = (181 * (x4 - x5) + 128) >> 8;\n\n        /* fourth stage */\n        pred_word = *((uint32*)(pred += 16)); /* read 4 bytes from pred */\n        res = (8192 + x4) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (8192 + x2) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (8192 + x1) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (8192 + x5) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec += lx)) = dst_word; /* save 4 bytes to dst */\n\n        pred_word = *((uint32*)(pred + 4)); /* read 4 bytes from pred */\n        res = (8192 - x5) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (8192 - x1) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (8192 - x2) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (8192 - x4) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */\n    }\n    return ;\n}\n\nvoid idct_row0x20zmv(Short *blk, UChar *rec, UChar *pred, Int lx)\n{\n    int32 x0, x2, x4, x6;\n    int i = 8;\n    uint32 pred_word, dst_word;\n    int res, res2;\n\n    /* preset the offset, such that we can take advantage pre-offset addressing mode   */\n    rec -= lx;\n    pred -= 16;\n\n    while (i--)\n    {\n        x2 = blk[2];\n        blk[2] = 0;\n        blk += 8; /* for proper rounding in the fourth stage */\n        /* both upper and lower*/\n        /* both x2orx6 and x0orx4 */\n        x6 = (W6 * x2 + 4) >> 3;\n        x2 = (W2 * x2 + 4) >> 3;\n        x0 = 8192 + x2;\n        x2 = 8192 - x2;\n        x4 = 8192 + x6;\n        x6 = 8192 - x6;\n\n        pred_word = *((uint32*)(pred += 16)); /* read 4 bytes from pred */\n        res = (x0) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x4) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x6) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x2) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec += lx)) = dst_word; /* save 4 bytes to dst */\n\n        pred_word = *((uint32*)(pred + 4)); /* read 4 bytes from pred */\n        res = (x2) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x6) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x4) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x0) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */\n    }\n\n    return ;\n}\n\nvoid idct_row0x10zmv(Short *blk, UChar *rec, UChar *pred, Int lx)\n{\n    int32 x1, x3, x5, x7;\n    int i = 8;\n    uint32 pred_word, dst_word;\n    int res, res2;\n\n    /* preset the offset, such that we can take advantage pre-offset addressing mode   */\n    rec -= lx;\n    pred -= 16;\n\n    while (i--)\n    {\n        x3 = blk[3];\n        blk[3] = 0;\n        blk += 8;\n\n        x1 = (W3 * x3 + 4) >> 3;\n        x3 = (-W5 * x3 + 4) >> 3;\n\n        x7 = (-181 * (x3 + x1) + 128) >> 8;\n        x5 = (181 * (x3 - x1) + 128) >> 8;\n\n        pred_word = *((uint32*)(pred += 16)); /* read 4 bytes from pred */\n        res = (8192 + x1) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (8192 + x7) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (8192 + x5) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (8192 + x3) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec += lx)) = dst_word; /* save 4 bytes to dst */\n\n        pred_word = *((uint32*)(pred + 4)); /* read 4 bytes from pred */\n        res = (8192 - x3) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (8192 - x5) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (8192 - x7) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (8192 - x1) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */\n    }\n    return ;\n}\n\n#endif /* SMALL_DCT */\n\nvoid idct_rowzmv(Short *blk, UChar *rec, UChar *pred, Int lx)\n{\n    int32 x0, x1, x2, x3, x4, x5, x6, x7, x8;\n    int i = 8;\n    uint32 pred_word, dst_word;\n    int res, res2;\n\n    /* preset the offset, such that we can take advantage pre-offset addressing mode   */\n    rec -= lx;\n    pred -= 16;\n    blk -= 8;\n\n    while (i--)\n    {\n        x1 = (int32)blk[12] << 8;\n        blk[12] = 0;\n        x2 = blk[14];\n        blk[14] = 0;\n        x3 = blk[10];\n        blk[10] = 0;\n        x4 = blk[9];\n        blk[9] = 0;\n        x5 = blk[15];\n        blk[15] = 0;\n        x6 = blk[13];\n        blk[13] = 0;\n        x7 = blk[11];\n        blk[11] = 0;\n        x0 = ((*(blk += 8)) << 8) + 8192;\n        *blk = 0;   /* for proper rounding in the fourth stage */\n\n        /* first stage */\n        x8 = W7 * (x4 + x5) + 4;\n        x4 = (x8 + (W1 - W7) * x4) >> 3;\n        x5 = (x8 - (W1 + W7) * x5) >> 3;\n        x8 = W3 * (x6 + x7) + 4;\n        x6 = (x8 - (W3 - W5) * x6) >> 3;\n        x7 = (x8 - (W3 + W5) * x7) >> 3;\n\n        /* second stage */\n        x8 = x0 + x1;\n        x0 -= x1;\n        x1 = W6 * (x3 + x2) + 4;\n        x2 = (x1 - (W2 + W6) * x2) >> 3;\n        x3 = (x1 + (W2 - W6) * x3) >> 3;\n        x1 = x4 + x6;\n        x4 -= x6;\n        x6 = x5 + x7;\n        x5 -= x7;\n\n        /* third stage */\n        x7 = x8 + x3;\n        x8 -= x3;\n        x3 = x0 + x2;\n        x0 -= x2;\n        x2 = (181 * (x4 + x5) + 128) >> 8;\n        x4 = (181 * (x4 - x5) + 128) >> 8;\n\n        /* fourth stage */\n        pred_word = *((uint32*)(pred += 16)); /* read 4 bytes from pred */\n\n        res = (x7 + x1) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x3 + x2) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x0 + x4) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x8 + x6) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec += lx)) = dst_word; /* save 4 bytes to dst */\n\n        pred_word = *((uint32*)(pred + 4)); /* read 4 bytes from pred */\n\n        res = (x8 - x6) >> 14;\n        ADD_AND_CLIP1(res);\n        res2 = (x0 - x4) >> 14;\n        ADD_AND_CLIP2(res2);\n        dst_word = (res2 << 8) | res;\n        res = (x3 - x2) >> 14;\n        ADD_AND_CLIP3(res);\n        dst_word |= (res << 16);\n        res = (x7 - x1) >> 14;\n        ADD_AND_CLIP4(res);\n        dst_word |= (res << 24);\n        *((uint32*)(rec + 4)) = dst_word; /* save 4 bytes to dst */\n    }\n    return;\n}\n\n/*----------------------------------------------------------------------------\n;  End Function: idctcol\n----------------------------------------------------------------------------*/\n/* ======================================================================== */\n/*  Function : BlockIDCTMotionComp                                              */\n/*  Date     : 10/16/2000                                                   */\n/*  Purpose  : fast IDCT routine                                    */\n/*  In/out   :                                                              */\n/*      Int* coeff_in   Dequantized coefficient\n        Int block_out   output IDCT coefficient\n        Int maxval      clip value                                          */\n/*  Modified :   7/31/01, add checking for all-zero and DC-only block.  */\n/*              do 8 columns at a time                                      */\n/*               8/2/01, do column first then row-IDCT.                 */\n/*               8/2/01, remove clipping (included in motion comp).     */\n/*               8/7/01, combine with motion comp.                      */\n/*               8/8/01, use AAN IDCT                                       */\n/*               9/4/05, use Chen's IDCT and 16 bit block                   */\n/* ======================================================================== */\nvoid BlockIDCTMotionComp(Short *block, UChar *bitmapcol, UChar bitmaprow,\n                         Int dctMode, UChar *rec, UChar *pred, Int lx_intra)\n{\n    Int i;\n    Int tmp, tmp2;\n    ULong tmp4;\n    Int bmap;\n    Short *ptr = block;\n    UChar *endcol;\n    UInt mask = 0xFF;\n    Int lx = lx_intra >> 1;\n    Int intra = (lx_intra & 1);\n\n    /*  all-zero block */\n    if (dctMode == 0 || bitmaprow == 0)\n    {\n        if (intra)\n        {\n            *((ULong*)rec) = *((ULong*)(rec + 4)) = 0;\n            *((ULong*)(rec += lx)) = 0;\n            *((ULong*)(rec + 4)) = 0;\n            *((ULong*)(rec += lx)) = 0;\n            *((ULong*)(rec + 4)) = 0;\n            *((ULong*)(rec += lx)) = 0;\n            *((ULong*)(rec + 4)) = 0;\n            *((ULong*)(rec += lx)) = 0;\n            *((ULong*)(rec + 4)) = 0;\n            *((ULong*)(rec += lx)) = 0;\n            *((ULong*)(rec + 4)) = 0;\n            *((ULong*)(rec += lx)) = 0;\n            *((ULong*)(rec + 4)) = 0;\n            *((ULong*)(rec += lx)) = 0;\n            *((ULong*)(rec + 4)) = 0;\n            return ;\n        }\n        else /* copy from previous frame */\n        {\n            *((ULong*)rec) = *((ULong*)pred);\n            *((ULong*)(rec + 4)) = *((ULong*)(pred + 4));\n            *((ULong*)(rec += lx)) = *((ULong*)(pred += 16));\n            *((ULong*)(rec + 4)) = *((ULong*)(pred + 4));\n            *((ULong*)(rec += lx)) = *((ULong*)(pred += 16));\n            *((ULong*)(rec + 4)) = *((ULong*)(pred + 4));\n            *((ULong*)(rec += lx)) = *((ULong*)(pred += 16));\n            *((ULong*)(rec + 4)) = *((ULong*)(pred + 4));\n            *((ULong*)(rec += lx)) = *((ULong*)(pred += 16));\n            *((ULong*)(rec + 4)) = *((ULong*)(pred + 4));\n            *((ULong*)(rec += lx)) = *((ULong*)(pred += 16));\n            *((ULong*)(rec + 4)) = *((ULong*)(pred + 4));\n            *((ULong*)(rec += lx)) = *((ULong*)(pred += 16));\n            *((ULong*)(rec + 4)) = *((ULong*)(pred + 4));\n            *((ULong*)(rec += lx)) = *((ULong*)(pred += 16));\n            *((ULong*)(rec + 4)) = *((ULong*)(pred + 4));\n            return ;\n        }\n    }\n\n    /* Test for DC only block */\n    if (dctMode == 1 || (bitmaprow == 0x80 && bitmapcol[0] == 0x80))\n    {\n        i = ((block[0] << 3) + 32) >> 6;\n        block[0] = 0;\n        if (intra)\n        {\n            if ((UInt)i > mask) i = mask & (~(i >> 31));\n\n            tmp = i | (i << 8);\n            tmp |= (tmp << 16);\n\n            *((ULong*)rec) = *((ULong*)(rec + 4)) = tmp;\n            *((ULong*)(rec += lx)) = tmp;\n            *((ULong*)(rec + 4)) = tmp;\n            *((ULong*)(rec += lx)) = tmp;\n            *((ULong*)(rec + 4)) = tmp;\n            *((ULong*)(rec += lx)) = tmp;\n            *((ULong*)(rec + 4)) = tmp;\n            *((ULong*)(rec += lx)) = tmp;\n            *((ULong*)(rec + 4)) = tmp;\n            *((ULong*)(rec += lx)) = tmp;\n            *((ULong*)(rec + 4)) = tmp;\n            *((ULong*)(rec += lx)) = tmp;\n            *((ULong*)(rec + 4)) = tmp;\n            *((ULong*)(rec += lx)) = tmp;\n            *((ULong*)(rec + 4)) = tmp;\n\n            return ;\n        }\n        else\n        {\n            endcol = rec + (lx << 3);\n            do\n            {\n                tmp4 = *((ULong*)pred);\n                tmp2 = tmp4 & 0xFF;\n                tmp2 += i;\n                if ((UInt)tmp2 > mask) tmp2 = mask & (~(tmp2 >> 31));\n                tmp = (tmp4 >> 8) & 0xFF;\n                tmp += i;\n                if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31));\n                tmp2 |= (tmp << 8);\n                tmp = (tmp4 >> 16) & 0xFF;\n                tmp += i;\n                if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31));\n                tmp2 |= (tmp << 16);\n                tmp = (tmp4 >> 24) & 0xFF;\n                tmp += i;\n                if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31));\n                tmp2 |= (tmp << 24);\n                *((ULong*)rec) = tmp2;\n\n                tmp4 = *((ULong*)(pred + 4));\n                tmp2 = tmp4 & 0xFF;\n                tmp2 += i;\n                if ((UInt)tmp2 > mask) tmp2 = mask & (~(tmp2 >> 31));\n                tmp = (tmp4 >> 8) & 0xFF;\n                tmp += i;\n                if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31));\n                tmp2 |= (tmp << 8);\n                tmp = (tmp4 >> 16) & 0xFF;\n                tmp += i;\n                if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31));\n                tmp2 |= (tmp << 16);\n                tmp = (tmp4 >> 24) & 0xFF;\n                tmp += i;\n                if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31));\n                tmp2 |= (tmp << 24);\n                *((ULong*)(rec + 4)) = tmp2;\n\n                rec += lx;\n                pred += 16;\n            }\n            while (rec < endcol);\n            return ;\n        }\n    }\n\n    for (i = 0; i < dctMode; i++)\n    {\n        bmap = (Int)bitmapcol[i];\n        if (bmap)\n        {\n            if ((bmap&0xf) == 0)\n                (*(idctcolVCA[bmap>>4]))(ptr);\n            else\n                idct_col(ptr);\n        }\n        ptr++;\n    }\n\n    if ((bitmaprow&0xf) == 0)\n    {\n        if (intra)\n            (*(idctrowVCAIntra[(Int)(bitmaprow>>4)]))(block, rec, lx);\n        else\n            (*(idctrowVCAzmv[(Int)(bitmaprow>>4)]))(block, rec, pred, lx);\n    }\n    else\n    {\n        if (intra)\n            idct_rowIntra(block, rec, lx);\n        else\n            idct_rowzmv(block, rec, pred, lx);\n    }\n}\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/fastquant.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"mp4enc_lib.h\"\n#include \"fastquant_inline.h\"\n\n#define siz 63\n#define LSL 18\n\n\nconst static UChar imask[8] = {128, 64, 32, 16, 8, 4, 2, 1};\n#define SIGN0(a)        ( ((a)<0) ? -1 : (((a)>0) ? 1  : 0) )\n\n/* variable bit precision quantization scale */\n/* used to avoid using 32-bit multiplication */\nconst static Short scaleArrayV[32] = {0, 16384, 8192, 5462,  /* 15 */\n                                      4096, 3277, 2731, 2341,\n                                      4096, 3641, 3277, 2979,  /* 16 */\n                                      2731, 2521, 2341, 2185,\n                                      4096, 3856, 3641, 3450,  /* 17 */\n                                      3277, 3121, 2979, 2850,\n                                      5462, 5243, 5042, 4855,  /* 18 */\n                                      4682, 4520, 4370, 4229\n                                     };\n\n/* scale for dc_scaler and qmat, note, no value smaller than 8 */\nconst static Short scaleArrayV2[47] = {0, 0, 0, 0, 0, 0, 0, 0, /* 15 */\n                                       4096, 3641, 3277, 2979, 2731, 2521, 2341, 2185,\n                                       4096, 3856, 3641, 3450, 3277, 3121, 2979, 2850,  /* 16 */\n                                       2731, 2622, 2521, 2428, 2341, 2260, 2185, 2115,\n                                       4096, 3972, 3856, 3745, 3641, 3543, 3450, 3361,  /* 17 */\n                                       3277, 3197, 3121, 3049, 2979, 2913, 2850\n                                      };\n\n/* AAN scale and zigzag */\nconst static Short AANScale[64] =\n{\n    /* 0 */ 0x1000, 0x0B89, 0x0C3E, 0x0D9B, 0x1000, 0x0A2E, 0x0EC8, 0x0E7F,\n    /* 1 */ 0x0B89, 0x0851, 0x08D4, 0x09CF, 0x0B89, 0x0757, 0x0AA8, 0x0A73,\n    /* 2 */ 0x0C3E, 0x08D4, 0x095F, 0x0A6A, 0x0C3E, 0x07CB, 0x0B50, 0x0B18,\n    /* 3 */ 0x0D9B, 0x09CF, 0x0A6A, 0x0B92, 0x0D9B, 0x08A8, 0x0C92, 0x0C54,\n    /* 4 */ 0x1000, 0x0B89, 0x0C3E, 0x0D9B, 0x1000, 0x0A2E, 0x0EC8, 0x0E7F,\n    /* 5 */ 0x0A2E, 0x0757, 0x07CB, 0x08A8, 0x0A2E, 0x067A, 0x0968, 0x0939,\n    /* 6 */ 0x0EC8, 0x0AA8, 0x0B50, 0x0C92, 0x0EC8, 0x0968, 0x0DA8, 0x0D64,\n    /* 7 */ 0x0E7F, 0x0A73, 0x0B18, 0x0C54, 0x0E7F, 0x0939, 0x0D64, 0x0D23\n};\n\nconst static UShort ZZTab[64] =\n{\n    /* 0 */ 0x0, 0x2, 0xA, 0xC, 0x1C, 0x1E, 0x36, 0x38,\n    /* 1 */ 0x4, 0x8, 0xE, 0x1A, 0x20, 0x34, 0x3A, 0x54,\n    /* 2 */ 0x6, 0x10, 0x18, 0x22, 0x32, 0x3C, 0x52, 0x56,\n    /* 3 */ 0x12, 0x16, 0x24, 0x30, 0x3E, 0x50, 0x58, 0x6A,\n    /* 4 */ 0x14, 0x26, 0x2E, 0x40, 0x4E, 0x5A, 0x68, 0x6C,\n    /* 5 */ 0x28, 0x2C, 0x42, 0x4C, 0x5C, 0x66, 0x6E, 0x78,\n    /* 6 */ 0x2A, 0x44, 0x4A, 0x5E, 0x64, 0x70, 0x76, 0x7A,\n    /* 7 */ 0x46, 0x48, 0x60, 0x62, 0x72, 0x74, 0x7C, 0x7E\n};\n\n\n//Tao need to remove, write another version of abs\n//#include <math.h>\n\n/* ======================================================================== */\n/*  Function : cal_dc_scalerENC                                             */\n/*  Date     : 01/25/2000                                                   */\n/*  Purpose  : calculation of DC quantization scale according to the\n               incoming Q and type;                                         */\n/*  In/out   :                                                              */\n/*      Int Qp      Quantizer                                               */\n/*  Return   :                                                              */\n/*          DC Scaler                                                       */\n/*  Modified :                                                              */\n/* ======================================================================== */\n/* ======================================================================== */\nInt cal_dc_scalerENC(Int QP, Int type)\n{\n\n    Int dc_scaler;\n    if (type == 1)\n    {\n        if (QP > 0 && QP < 5)\n            dc_scaler = 8;\n        else if (QP > 4 && QP < 9)\n            dc_scaler = 2 * QP;\n        else if (QP > 8 && QP < 25)\n            dc_scaler = QP + 8;\n        else\n            dc_scaler = 2 * QP - 16;\n    }\n    else\n    {\n        if (QP > 0 && QP < 5)\n            dc_scaler = 8;\n        else if (QP > 4 && QP < 25)\n            dc_scaler = (QP + 13) / 2;\n        else\n            dc_scaler = QP - 6;\n    }\n    return dc_scaler;\n}\n\n\n/***********************************************************************\n Function: BlckQuantDequantH263\n Date:     June 15, 1999\n Purpose:  Combine BlockQuantH263 and BlockDequantH263ENC\n Input:   coeff=> DCT coefficient\n Output:  qcoeff=> quantized coefficient\n          rcoeff=> reconstructed coefficient\n          return CBP for this block\n          4/2/01,  correct dc_scaler for short_header mode.\n          5/14/01,\n          changed the division into LUT multiplication/shift and other\n          modifications to speed up fastQuant/DeQuant (check for zero 1st, rowq LUT,\n          fast bitmaprow mask and borrowed Addition method instead of ifs from , ).\n          6/25/01,\n          Further optimization (~100K/QCIF), need more testing/comment before integration.\n\n          7/4/01,  break up Inter / Intra function and merge for different cases.\n          7/22/01,  combine AAN scaling here and reordering.\n          7/24/01, , reorder already done in FDCT, the input here is in the next block and\n            it's the\n            transpose of the raster scan. Output the same order (for proof of concenpt).\n          8/1/01, , change FDCT to do row/column FDCT without reordering, input is still\n            in the next block. The reconstructed DCT output is current block in normal\n            order. The quantized output is in zigzag scan order for INTER, row/column for\n            INTRA. Use bitmapzz for zigzag RunLevel for INTER.  The quantization is done\n            in column/row scanning order.\n          8/2/01, , change IDCT to do column/row, change bitmaprow/col to the opposite.\n          8/3/01, , add clipping to the reconstructed coefficient [-2047,2047]\n          9/4/05, , removed scaling for AAN IDCT, use Chen IDCT instead.\n ********************************************************************/\n\nInt BlockQuantDequantH263Inter(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam,\n                               UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,\n                               Int dctMode, Int comp, Int dummy, UChar shortHeader)\n{\n    Int i, zz;\n    Int tmp, coeff, q_value;\n    Int QPdiv2 = QuantParam->QPdiv2;\n    Int QPx2 = QuantParam->QPx2;\n    Int Addition = QuantParam->Addition;\n    Int QPx2plus = QuantParam->QPx2plus;\n    Int round = 1 << 15;\n    Int q_scale = scaleArrayV[QuantParam->QP];\n    Int shift = 15 + (QPx2 >> 4);\n    Int *temp;\n    UChar *bcolptr = bitmapcol;\n    Int ac_clip;    /* quantized coeff bound */\n\n    OSCL_UNUSED_ARG(comp);\n    OSCL_UNUSED_ARG(dummy);\n\n\n    if (shortHeader) ac_clip = 126; /* clip between [-127,126] (standard allows 127!) */\n    else ac_clip = 2047;  /* clip between [-2048,2047] */\n\n    /* reset all bitmap to zero */\n    temp = (Int*) bitmapcol;\n    temp[0] = temp[1] = 0;\n    bitmapzz[0] = bitmapzz[1] = 0;\n    *bitmaprow = 0;\n    QPx2plus <<= 4;\n    QPx2plus -= 8;\n\n    rcoeff += 64; /* actual data is 64 item ahead */\n    //end  = rcoeff + dctMode - 1;\n    //rcoeff--;\n    bcolptr--;\n    i = 0;\n\n    do\n    {\n        bcolptr++;\n        //rcoeff++;\n        //i=0;\n        coeff = rcoeff[i];\n        if (coeff == 0x7fff) /* all zero column */\n        {\n            i++;\n            continue;\n        }\n\n        do\n        {\n            if (coeff >= -QPx2plus && coeff < QPx2plus)  /* quantize to zero */\n            {\n                i += 8;\n                if (i < (dctMode << 3))\n                {\n                    coeff = rcoeff[i];\n                    if (coeff > -QPx2plus && coeff < QPx2plus)  /* quantize to zero */\n                    {\n                        i += 8;\n                        coeff = rcoeff[i];\n                        continue;\n                    }\n                    else\n                        goto NONZERO1;\n                }\n            }\n            else\n            {\nNONZERO1:\n                /* scaling */\n                q_value = AANScale[i];  /* load scale AAN */\n                zz = ZZTab[i];  /* zigzag order */\n\n                coeff = aan_scale(q_value, coeff, round, QPdiv2);\n                q_value = coeff_quant(coeff, q_scale, shift);\n\n                /* dequantization  */\n                if (q_value)\n                {\n\n                    //coeff = PV_MIN(ac_clip,PV_MAX(-ac_clip-1, q_value));\n                    q_value = coeff_clip(q_value, ac_clip);\n                    qcoeff[zz>>1] = q_value;\n\n                    // dequant and clip\n                    //coeff = PV_MIN(2047,PV_MAX(-2048, q_value));\n                    tmp = 2047;\n                    coeff = coeff_dequant(q_value, QPx2, Addition, tmp);\n                    rcoeff[i-64] = coeff;\n\n                    (*bcolptr) |= imask[i>>3];\n                    if ((zz >> 1) > 31) bitmapzz[1] |= (1 << (63 - (zz >> 1)));\n                    else        bitmapzz[0] |= (1 << (31 - (zz >> 1)));\n                }\n                i += 8;\n                coeff = rcoeff[i];\n            }\n        }\n        while (i < (dctMode << 3));\n\n        i += (1 - (dctMode << 3));\n    }\n    while (i < dctMode) ;\n\n    i = dctMode;\n    tmp = 1 << (8 - i);\n    while (i--)\n    {\n        if (bitmapcol[i])(*bitmaprow) |= tmp;\n        tmp <<= 1;\n    }\n\n    if (*bitmaprow)\n        return 1;\n    else\n        return 0;\n}\n\nInt BlockQuantDequantH263Intra(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam,\n                               UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,\n                               Int dctMode, Int comp, Int dc_scaler, UChar shortHeader)\n{\n    Int i;\n    Int tmp, coeff, q_value;\n    Int QPx2 = QuantParam->QPx2;\n    Int Addition = QuantParam->Addition;\n    Int QPx2plus = QuantParam->QPx2plus;\n    Int round = 1 << 15;\n    Int q_scale = scaleArrayV[QuantParam->QP];\n    Int shift = 15 + (QPx2 >> 4);\n    UChar *bmcolptr = bitmapcol;\n    Int ac_clip;    /* quantized coeff bound */\n\n    OSCL_UNUSED_ARG(bitmapzz);\n    OSCL_UNUSED_ARG(comp);\n\n\n    if (shortHeader) ac_clip = 126; /* clip between [-127,126] (standard allows 127!) */\n    else ac_clip = 2047;  /* clip between [-2048,2047] */\n\n    *((Int*)bitmapcol) = *((Int*)(bitmapcol + 4)) = 0;\n    *bitmaprow = 0;\n\n    QPx2plus = QPx2 << 4;\n    QPx2plus -= 8;\n\n    rcoeff += 64; /* actual data is 64 element ahead */\n    i = 0;\n\n    /* DC value */\n    coeff = *rcoeff;\n    /* scaling */\n    if (coeff == 0x7fff && !shortHeader) /* all zero column */\n    {\n        bmcolptr++;\n        i++;\n    }\n    else\n    {\n        if (coeff == 0x7fff) /* shortHeader on */\n        {\n            coeff = 1; /* can't be zero */\n            qcoeff[0] = coeff;\n            coeff = coeff * dc_scaler;\n            coeff = PV_MAX(-2048, PV_MIN(2047, coeff));\n            rcoeff[-64] = coeff;\n            bitmapcol[0] |= 128;\n            bmcolptr++;\n            //qcoeff++;\n            //rcoeff++;\n            //i=0;\n            i++;\n        }\n        else\n        {\n            q_value = round + (coeff << 12);\n            coeff = q_value >> 16;\n            if (coeff >= 0) coeff += (dc_scaler >> 1) ;\n            else            coeff -= (dc_scaler >> 1) ;\n            q_value = scaleArrayV2[dc_scaler];\n            coeff = coeff * q_value;\n            coeff >>= (15 + (dc_scaler >> 4));\n            coeff += ((UInt)coeff >> 31);\n\n            if (shortHeader)\n                coeff = PV_MAX(1, PV_MIN(254, coeff));\n\n            if (coeff)\n            {\n                qcoeff[0] = coeff;\n                coeff = coeff * dc_scaler;\n                coeff = PV_MAX(-2048, PV_MIN(2047, coeff));\n                rcoeff[-64] = coeff;\n                bitmapcol[0] |= 128;\n            }\n            i += 8;\n        }\n    }\n    /* AC values */\n    do\n    {\n        coeff = rcoeff[i];\n        if (coeff == 0x7fff) /* all zero row */\n        {\n            bmcolptr++;\n            i++;\n            continue;\n        }\n        do\n        {\n            if (coeff >= -QPx2plus && coeff < QPx2plus)  /* quantize to zero */\n            {\n                i += 8;\n                if (i < dctMode << 3)\n                {\n                    coeff = rcoeff[i];\n                    if (coeff > -QPx2plus && coeff < QPx2plus)  /* quantize to zero */\n                    {\n                        i += 8;\n                        coeff = rcoeff[i];\n                        continue;\n                    }\n                    else\n                        goto NONZERO2;\n                }\n            }\n            else\n            {\nNONZERO2:   /* scaling */\n                q_value = AANScale[i]; /*  09/02/05 */\n\n                /* scale aan */\n                q_value = smlabb(q_value, coeff, round);\n                coeff = q_value >> 16;\n                /* quant */\n                q_value = smulbb(q_scale, coeff); /*mov     q_value, coeff, lsl #14 */\n                /*smull tmp, coeff, q_value, q_scale*/\n                q_value >>= shift;\n                q_value += ((UInt)q_value >> 31); /* add 1 if negative */\n\n                if (q_value)\n                {\n                    //coeff = PV_MIN(ac_clip,PV_MAX(-ac_clip-1, q_value));\n                    q_value = coeff_clip(q_value, ac_clip);\n                    qcoeff[i] = q_value;\n\n                    // dequant and clip\n                    //coeff = PV_MIN(2047,PV_MAX(-2048, q_value));\n                    tmp = 2047;\n                    coeff = coeff_dequant(q_value, QPx2, Addition, tmp);\n                    rcoeff[i-64] = coeff;\n\n                    (*bmcolptr) |= imask[i>>3];\n                }\n                i += 8;\n                coeff = rcoeff[i];\n            }\n        }\n        while (i < (dctMode << 3)) ;\n\n        //qcoeff++; /* next column */\n        bmcolptr++;\n        //rcoeff++;\n        i += (1 - (dctMode << 3)); //i = 0;\n    }\n    while (i < dctMode);//while(rcoeff < end) ;\n\n    i = dctMode;\n    tmp = 1 << (8 - i);\n    while (i--)\n    {\n        if (bitmapcol[i])(*bitmaprow) |= tmp;\n        tmp <<= 1;\n    }\n\n    if (((*bitmaprow)&127) || (bitmapcol[0]&127)) /* exclude DC */\n        return 1;\n    else\n        return 0;\n}\n\n\n/***********************************************************************\n Function: BlckQuantDequantH263DC\n Date:     5/3/2001\n Purpose:   H.263 quantization mode, only for DC component\n 6/25/01,\n          Further optimization (~100K/QCIF), need more testing/comment before integration.\n\n ********************************************************************/\nInt BlockQuantDequantH263DCInter(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam,\n                                 UChar *bitmaprow, UInt *bitmapzz, Int dummy, UChar shortHeader)\n{\n    Int coeff, scale_q;\n    Int CBP = 0;\n    Int QP = QuantParam->QP;\n    Int QPx2plus = QuantParam->QPx2plus;\n    Int Addition = QuantParam->Addition;\n    Int shift = 15 + (QP >> 3);\n    Int ac_clip;    /* quantized coeff bound */\n    Int tmp;\n\n    OSCL_UNUSED_ARG(dummy);\n\n    if (shortHeader) ac_clip = 126; /* clip between [-127,126] (standard allows 127!) */\n    else ac_clip = 2047;  /* clip between [-2048,2047] */\n\n    *bitmaprow = 0;\n    bitmapzz[0] = bitmapzz[1] = 0;\n    coeff = rcoeff[0];\n\n    if (coeff >= -QPx2plus && coeff < QPx2plus)\n    {\n        rcoeff[0] = 0;\n        return CBP;//rcoeff[0] = 0; not needed since CBP will be zero\n    }\n    else\n    {\n        scale_q = scaleArrayV[QP];\n\n        coeff = aan_dc_scale(coeff, QP);\n\n        scale_q = coeff_quant(coeff, scale_q, shift);\n\n        //coeff = PV_MIN(ac_clip,PV_MAX(-ac_clip-1, tmp));\n        scale_q = coeff_clip(scale_q, ac_clip);\n\n        qcoeff[0] = scale_q;\n\n        QP <<= 1;\n        //coeff = PV_MIN(2047,PV_MAX(-2048, tmp));\n        tmp = 2047;\n        coeff = coeff_dequant(scale_q, QP, Addition, tmp);\n\n        rcoeff[0] = coeff;\n\n        (*bitmaprow) = 128;\n        bitmapzz[0] = (ULong)1 << 31;\n        CBP = 1;\n    }\n    return CBP;\n}\n\n\nInt BlockQuantDequantH263DCIntra(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam,\n                                 UChar *bitmaprow, UInt *bitmapzz, Int dc_scaler, UChar shortHeader)\n{\n    Int tmp, coeff;\n\n    OSCL_UNUSED_ARG(QuantParam);\n\n    *bitmaprow = 0;\n    coeff = rcoeff[0];\n\n    if (coeff >= 0) coeff += (dc_scaler >> 1) ;\n    else            coeff -= (dc_scaler >> 1) ;\n    tmp = scaleArrayV2[dc_scaler];\n    tmp = coeff * tmp;\n    tmp >>= (15 + (dc_scaler >> 4));\n    tmp += ((UInt)tmp >> 31);\n\n    if (shortHeader)\n        tmp = PV_MAX(1, PV_MIN(254, tmp));\n\n    if (tmp)\n    {\n        qcoeff[0] = tmp;\n        coeff = tmp * dc_scaler;\n        coeff = PV_MAX(-2048, PV_MIN(2047, coeff));\n        rcoeff[0] = coeff;\n        *bitmaprow = 128;\n        bitmapzz[0] = (ULong)1 << 31;\n    }\n\n    return 0;\n}\n\n#ifndef NO_MPEG_QUANT\n/***********************************************************************\n Function: BlckQuantDequantMPEG\n Date:     June 15, 1999\n Purpose:  Combine BlockQuantMPEG and BlockDequantMPEGENC\n Input:   coeff=> DCT coefficient\n Output:  qcoeff=> quantized coefficient\n          rcoeff=> reconstructed coefficient\n Modified:  7/5/01, break up function for Intra/Inter\n          8/3/01,  update with changes from H263 quant mode.\n          8/3/01,  add clipping to the reconstructed coefficient [-2048,2047]\n          8/6/01,  optimize using multiplicative lookup-table.\n                     can be further optimized using ARM assembly, e.g.,\n                     clipping, 16-bit mult., etc !!!!!!!!!!!!!\n ********************************************************************/\n\nInt BlockQuantDequantMPEGInter(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat,\n                               UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,\n                               Int dctMode, Int comp, Int dc_scaler)\n{\n    Int i, zz;\n    Int tmp, coeff, q_value = 0;\n    Int sum = 0;\n    Int stepsize, QPx2 = QP << 1;\n    Int CBP = 0;\n    Int round = 1 << 15;\n    Int q_scale = scaleArrayV[QP];\n    Int shift = 15 + (QP >> 3);\n    UChar *bcolptr = bitmapcol;\n\n    OSCL_UNUSED_ARG(dc_scaler);\n    OSCL_UNUSED_ARG(comp);\n\n\n    *((Int*)bitmapcol) = *((Int*)(bitmapcol + 4)) = 0;\n    bitmapzz[0] = bitmapzz[1] = 0;\n    *bitmaprow = 0;\n\n    rcoeff += 64;\n    i = 0;\n    bcolptr--;\n\n    do\n    {\n        bcolptr++;\n        coeff = rcoeff[i];\n        if (coeff == 0x7fff) /* all zero column */\n        {\n            i++;\n            continue;\n        }\n        do\n        {\n            q_value = AANScale[i];  /*  09/02/05 scaling for AAN*/\n            /* aan scaling */\n            q_value = smlabb(q_value, coeff, round);\n\n            coeff = q_value >> 16;\n\n            stepsize = qmat[i];\n//          if(coeff>0)     coeff = (16*coeff + (stepsize/2)) / stepsize;\n//          else            coeff = (16*coeff - (stepsize/2)) / stepsize;\n            coeff <<= 4;\n            if (coeff >= 0) coeff += (stepsize >> 1) ;\n            else            coeff -= (stepsize >> 1) ;\n            q_value = scaleArrayV2[stepsize];\n            /* mpeg quant table scale */\n            coeff = smulbb(coeff, q_value);\n\n            coeff >>= (15 + (stepsize >> 4));\n            coeff += ((UInt)coeff >> 31);\n\n            /* QP scale */\n            if (coeff >= -QPx2 && coeff < QPx2)  /* quantized to zero*/\n            {\n                i += 8;\n            }\n            else\n            {\n//              q_value = coeff/(QPx2);\n                q_value = coeff_quant(coeff, q_scale, shift);\n\n                if (q_value)                /* dequant */\n                {\n\n                    zz = ZZTab[i];  /* zigzag order */\n\n                    tmp = 2047;\n\n                    q_value = clip_2047(q_value, tmp);\n\n                    qcoeff[zz>>1] = q_value;\n\n                    //q_value=(((coeff*2)+SIGN0(coeff))*stepsize*QP)/16;\n                    /* no need for SIGN0, no zero coming in this {} */\n                    q_value = coeff_dequant_mpeg(q_value, stepsize, QP, tmp);\n\n                    rcoeff[i-64] = q_value;\n\n                    sum += q_value;\n                    (*bcolptr) |= imask[i>>3];\n                    if ((zz >> 1) > 31) bitmapzz[1] |= (1 << (63 - (zz >> 1)));\n                    else        bitmapzz[0] |= (1 << (31 - (zz >> 1)));\n                }\n                i += 8;\n            }\n            coeff = rcoeff[i];\n        }\n        while (i < (dctMode << 3)) ;\n\n        i += (1 - (dctMode << 3));\n    }\n    while (i < dctMode) ;\n\n    i = dctMode;\n    tmp = 1 << (8 - i);\n    while (i--)\n    {\n        if (bitmapcol[i])(*bitmaprow) |= tmp;\n        tmp <<= 1;\n    }\n\n    if (*bitmaprow)\n        CBP = 1;   /* check CBP before mismatch control,  7/5/01 */\n\n    /* Mismatch control,  5/3/01 */\n    if (CBP)\n    {\n        if ((sum&0x1) == 0)\n        {\n            rcoeff--;  /* rcoeff[63] */\n            coeff = *rcoeff;\n            coeff ^= 0x1;\n            *rcoeff = coeff;\n            if (coeff)\n            {\n                bitmapcol[7] |= 1;\n                (*bitmaprow) |= 1;\n            }\n        }\n    }\n\n    return CBP;\n}\n\nInt BlockQuantDequantMPEGIntra(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat,\n                               UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,\n                               Int dctMode, Int comp, Int dc_scaler)\n{\n    Int i;\n    Int tmp, coeff, q_value = 0;\n    Int sum = 0;\n    Int stepsize;\n    Int CBP = 0;\n    Int round = 1 << 15;\n    Int q_scale = scaleArrayV[QP];\n    Int shift = 15 + (QP >> 3);\n    Int round2 = (3 * QP + 2) >> 2;\n    Int QPx2plus = (QP << 1) - round2;\n    UChar *bmcolptr = bitmapcol;\n\n    OSCL_UNUSED_ARG(bitmapzz);\n    OSCL_UNUSED_ARG(comp);\n\n    *((Int*)bitmapcol) = *((Int*)(bitmapcol + 4)) = 0;\n    *bitmaprow = 0;\n\n    rcoeff += 64;\n    i = 0;\n\n    /* DC value */\n    coeff = *rcoeff;\n\n    if (coeff == 0x7fff) /* all zero column */\n    {\n        bmcolptr++;\n        i++;\n    }\n    else\n    {\n        q_value = round + (coeff << 12);\n        coeff = q_value >> 16;\n        /*if (coeff >= 0)   coeff = (coeff + (dc_scaler/2)) / dc_scaler;\n        else            coeff = (coeff - (dc_scaler/2)) / dc_scaler;*/\n        if (coeff >= 0) coeff += (dc_scaler >> 1) ;\n        else            coeff -= (dc_scaler >> 1) ;\n        q_value = scaleArrayV2[dc_scaler];\n\n        /* mpeg quant table scale */\n        coeff = smulbb(coeff, q_value);\n\n        coeff >>= (15 + (dc_scaler >> 4));\n        coeff += ((UInt)coeff >> 31);\n\n        if (coeff)\n        {\n            coeff = PV_MAX(1, PV_MIN(254, coeff));\n            qcoeff[0] = coeff;\n\n            coeff = smulbb(coeff, dc_scaler);\n\n            q_value = clip_2047(coeff, 2047);\n\n            sum = q_value;\n\n            rcoeff[-64] = q_value;\n\n            bitmapcol[0] |= 128;\n        }\n        i += 8;\n    }\n    /* AC values */\n    do\n    {\n        coeff = rcoeff[i];\n        if (coeff == 0x7fff) /* all zero row */\n        {\n            bmcolptr++;\n            i++;\n            continue;\n        }\n        do\n        {\n            /* scaling */\n            q_value = AANScale[i]; /*  09/02/05 */\n\n            /* q_value = coeff*q_value + round */\n            q_value = smlabb(coeff, q_value, round);\n            coeff = q_value >> 16;\n\n            stepsize = qmat[i];\n            /*if(coeff>0)       coeff = (16*coeff + (stepsize/2)) / stepsize;\n            else            coeff = (16*coeff - (stepsize/2)) / stepsize;*/\n            coeff <<= 4;\n            if (coeff >= 0) coeff += (stepsize >> 1) ;\n            else            coeff -= (stepsize >> 1) ;\n            q_value = scaleArrayV2[stepsize];\n\n            /* scale mpeg quant */\n            coeff = smulbb(coeff, q_value);\n\n            coeff >>= (15 + (stepsize >> 4));\n            coeff += ((UInt)coeff >> 31);\n\n            if (coeff >= -QPx2plus && coeff < QPx2plus)\n            {\n                i += 8;\n            }\n            else\n            {\n                //q_value = ( coeff + SIGN0(coeff)*((3*QP+2)/4))/(2*QP);\n                if (coeff > 0) coeff += round2;\n                else if (coeff < 0) coeff -= round2;\n\n                q_value = smulbb(coeff, q_scale);\n                q_value >>= shift;\n                q_value += ((UInt)q_value >> 31);\n\n                if (q_value)\n                {\n                    tmp = 2047;\n                    q_value = clip_2047(q_value, tmp);\n\n                    qcoeff[i] = q_value;\n\n                    stepsize = smulbb(stepsize, QP);\n                    q_value =  smulbb(q_value, stepsize);\n\n                    q_value = coeff_dequant_mpeg_intra(q_value, tmp);\n                    //q_value = (coeff*stepsize*QP*2)/16;\n\n                    rcoeff[i-64] = q_value;\n\n                    sum += q_value;\n                    (*bmcolptr) |= imask[i>>3];\n                }\n                i += 8;\n            }\n            coeff = rcoeff[i];\n        }\n        while (i < (dctMode << 3)) ;\n\n        bmcolptr++;\n        i += (1 - (dctMode << 3));\n    }\n    while (i < dctMode) ;\n\n    i = dctMode;\n    tmp = 1 << (8 - i);\n    while (i--)\n    {\n        if (bitmapcol[i])(*bitmaprow) |= tmp;\n        tmp <<= 1;\n    }\n\n    if (((*bitmaprow) &127) || (bitmapcol[0]&127))\n        CBP = 1;  /* check CBP before mismatch control,  7/5/01 */\n\n    /* Mismatch control,  5/3/01 */\n    if (CBP || bitmapcol[0])\n    {\n        if ((sum&0x1) == 0)\n        {\n            rcoeff--;  /* rcoeff[63] */\n            coeff = *rcoeff;\n            coeff ^= 0x1;\n            *rcoeff = coeff;\n            if (coeff)\n            {\n                bitmapcol[7] |= 1;\n                (*bitmaprow) |= 1;\n            }\n        }\n    }\n\n    return CBP;\n}\n\n\n/***********************************************************************\n Function: BlckQuantDequantMPEGDC\n Date:     5/3/2001\n Purpose:  MPEG Quant/Dequant for DC only block.\n ********************************************************************/\nInt BlockQuantDequantMPEGDCInter(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat,\n                                 UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dummy)\n{\n    Int q_value, coeff, stepsize;\n    Int CBP = 0;\n    Int q_scale = scaleArrayV[QP];\n    Int shift = 15 + (QP >> 3);\n    Int QPx2 = QP << 1;\n\n    OSCL_UNUSED_ARG(dummy);\n\n    *((Int*)bitmapcol) = *((Int*)(bitmapcol + 4)) = 0;\n    *bitmaprow = 0;\n    bitmapzz[0] = bitmapzz[1] = 0;\n    coeff = rcoeff[0];\n    stepsize = qmat[0];\n\n    /*if(coeff>0)       coeff = (16*coeff + (stepsize/2)) / stepsize;\n    else            coeff = (16*coeff - (stepsize/2)) / stepsize;*/\n    coeff <<= 4;\n    if (coeff >= 0) coeff += (stepsize >> 1) ;\n    else            coeff -= (stepsize >> 1) ;\n    q_value = scaleArrayV2[stepsize];\n\n    coeff = smulbb(coeff, q_value);\n\n    coeff >>= (15 + (stepsize >> 4));\n    coeff += ((UInt)coeff >> 31);\n\n    if (coeff >= -QPx2 && coeff < QPx2)\n    {\n        rcoeff[0] = 0;\n        return CBP;\n    }\n    else\n    {\n//      q_value = coeff/(QPx2);\n        q_value = coeff_quant(coeff, q_scale, shift);\n\n        if (q_value)\n        {\n\n            //PV_MIN(2047,PV_MAX(-2048, q_value));\n            q_value = clip_2047(q_value, 2047);\n            qcoeff[0] = q_value;\n            q_value = coeff_dequant_mpeg(q_value, stepsize, QP, 2047);\n            //q_value=(((coeff*2)+SIGN0(coeff))*stepsize*QP)/16;\n            rcoeff[0] = q_value;\n\n            bitmapcol[0] = 128;\n            (*bitmaprow) = 128;\n            bitmapzz[0] = (UInt)1 << 31;\n            CBP = 1;\n\n            /* Mismatch control,  5/3/01 */\n            if ((q_value&0x1) == 0)\n            {\n                rcoeff[63] = 1; /* after scaling it remains the same */\n                bitmapcol[7] |= 1;\n                (*bitmaprow) |= 1;\n            }\n        }\n    }\n    return CBP;\n}\n\n\nInt BlockQuantDequantMPEGDCIntra(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat,\n                                 UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,\n                                 Int dc_scaler)\n{\n    Int tmp, coeff, q_value;\n\n    OSCL_UNUSED_ARG(QP);\n    OSCL_UNUSED_ARG(qmat);\n\n\n    *((Int*)bitmapcol) = *((Int*)(bitmapcol + 4)) = 0;\n    *bitmaprow = 0;\n    coeff = rcoeff[0];\n\n    /*if (coeff >= 0)   tmp = (coeff + dc_scaler/2) / dc_scaler;\n    else            tmp = (coeff - dc_scaler/2) / dc_scaler;*/\n    if (coeff >= 0) coeff += (dc_scaler >> 1) ;\n    else            coeff -= (dc_scaler >> 1) ;\n    tmp = scaleArrayV2[dc_scaler];\n\n    tmp = smulbb(tmp, coeff);\n    tmp >>= (15 + (dc_scaler >> 4));\n    tmp += ((UInt)tmp >> 31);\n\n    if (tmp)\n    {\n        coeff = PV_MAX(1, PV_MIN(254, tmp));\n        qcoeff[0] = coeff;\n\n        q_value = smulbb(coeff, dc_scaler);\n        q_value = clip_2047(q_value, 2047);\n        rcoeff[0] = q_value;\n        bitmapcol[0] = 128;\n        *bitmaprow = 128;\n        bitmapzz[0] = (UInt)1 << 31;\n\n        /* Mismatch control,  5/3/01 */\n        if ((q_value&0x1) == 0)\n        {\n            rcoeff[63] = 1; /* after scaling it remains the same */\n            bitmapcol[7] |= 1;\n            (*bitmaprow) |= 1;\n        }\n    }\n\n    return 0;\n}\n#endif\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/fastquant_inline.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*********************************************************************************/\n/*  Filename: fastquant_inline.h                                                        */\n/*  Description: Implementation for in-line functions used in dct.cpp           */\n/*  Modified:                                                                   */\n/*********************************************************************************/\n#ifndef _FASTQUANT_INLINE_H_\n#define _FASTQUANT_INLINE_H_\n\n#include \"mp4def.h\"\n#include \"oscl_base_macros.h\"\n\n#if !defined(PV_ARM_GCC_V5) /* ARM GNU COMPILER  */\n\n__inline int32 aan_scale(int32 q_value, int32 coeff, int32 round, int32 QPdiv2)\n{\n    q_value = coeff * q_value + round;\n    coeff = q_value >> 16;\n    if (coeff < 0)  coeff += QPdiv2;\n    else            coeff -= QPdiv2;\n\n    return coeff;\n}\n\n\n__inline int32 coeff_quant(int32 coeff, int32 q_scale, int32 shift)\n{\n    int32 q_value;\n\n    q_value = coeff * q_scale;      //q_value = -((-(coeff + QPdiv2)*q_scale)>>LSL);\n    q_value >>= shift;                  //q_value = (((coeff - QPdiv2)*q_scale)>>LSL );\n    q_value += ((UInt)q_value >> 31); /* add one if negative */\n\n    return q_value;\n}\n\n__inline int32  coeff_clip(int32 q_value, int32 ac_clip)\n{\n    int32 coeff = q_value + ac_clip;\n\n    if ((UInt)coeff > (UInt)(ac_clip << 1))\n        q_value = ac_clip ^(q_value >> 31);\n\n    return q_value;\n}\n\n__inline int32 coeff_dequant(int32 q_value, int32 QPx2, int32 Addition, int32 tmp)\n{\n    int32 coeff;\n\n    OSCL_UNUSED_ARG(tmp);\n\n    if (q_value < 0)\n    {\n        coeff = q_value * QPx2 - Addition;\n        if (coeff < -2048)\n            coeff = -2048;\n    }\n    else\n    {\n        coeff = q_value * QPx2 + Addition;\n        if (coeff > 2047)\n            coeff = 2047;\n    }\n    return coeff;\n}\n\n__inline int32 smlabb(int32 q_value, int32 coeff, int32 round)\n{\n    q_value = coeff * q_value + round;\n\n    return q_value;\n}\n\n__inline int32 smulbb(int32 q_scale, int32 coeff)\n{\n    int32 q_value;\n\n    q_value = coeff * q_scale;\n\n    return q_value;\n}\n\n__inline int32 aan_dc_scale(int32 coeff, int32 QP)\n{\n\n    if (coeff < 0)  coeff += (QP >> 1);\n    else            coeff -= (QP >> 1);\n\n    return coeff;\n}\n\n__inline int32 clip_2047(int32 q_value, int32 tmp)\n{\n    OSCL_UNUSED_ARG(tmp);\n\n    if (q_value < -2048)\n    {\n        q_value = -2048;\n    }\n    else if (q_value > 2047)\n    {\n        q_value = 2047;\n    }\n\n    return q_value;\n}\n\n__inline int32 coeff_dequant_mpeg(int32 q_value, int32 stepsize, int32 QP, int32 tmp)\n{\n    int32 coeff;\n\n    OSCL_UNUSED_ARG(tmp);\n\n    coeff = q_value << 1;\n    stepsize *= QP;\n    if (coeff > 0)\n    {\n        q_value = (coeff + 1) * stepsize;\n        q_value >>= 4;\n        if (q_value > 2047) q_value = 2047;\n    }\n    else\n    {\n        q_value = (coeff - 1) * stepsize;\n        q_value += 15;\n        q_value >>= 4;\n        if (q_value < -2048)    q_value = -2048;\n    }\n\n    return q_value;\n}\n\n__inline int32 coeff_dequant_mpeg_intra(int32 q_value, int32 tmp)\n{\n    OSCL_UNUSED_ARG(tmp);\n\n    q_value <<= 1;\n    if (q_value > 0)\n    {\n        q_value >>= 4;\n        if (q_value > 2047) q_value = 2047;\n    }\n    else\n    {\n        q_value += 15;\n        q_value >>= 4;\n        if (q_value < -2048) q_value = -2048;\n    }\n\n    return q_value;\n}\n\n#elif defined(__CC_ARM)  /* only work with arm v5 */\n\n#if defined(__TARGET_ARCH_5TE)\n\n__inline int32 aan_scale(int32 q_value, int32 coeff,\n                         int32 round, int32 QPdiv2)\n{\n    __asm\n    {\n        smlabb q_value, coeff, q_value, round\n        movs       coeff, q_value, asr #16\n        addle   coeff, coeff, QPdiv2\n        subgt   coeff, coeff, QPdiv2\n    }\n\n    return coeff;\n}\n\n__inline int32 coeff_quant(int32 coeff, int32 q_scale, int32 shift)\n{\n    int32 q_value;\n\n    __asm\n    {\n        smulbb  q_value, q_scale, coeff    /*mov    coeff, coeff, lsl #14*/\n        mov     coeff, q_value, asr shift   /*smull tmp, coeff, q_scale, coeff*/\n        add q_value, coeff, coeff, lsr #31\n    }\n\n\n    return q_value;\n}\n\n__inline int32 coeff_dequant(int32 q_value, int32 QPx2, int32 Addition, int32 tmp)\n{\n    int32 coeff;\n\n    __asm\n    {\n        cmp     q_value, #0\n        smulbb  coeff, q_value, QPx2\n        sublt   coeff, coeff, Addition\n        addge   coeff, coeff, Addition\n        add     q_value, coeff, tmp\n        subs    q_value, q_value, #3840\n        subcss  q_value, q_value, #254\n        eorhi   coeff, tmp, coeff, asr #31\n    }\n\n    return coeff;\n}\n\n__inline int32 smlabb(int32 q_value, int32 coeff, int32 round)\n{\n    __asm\n    {\n        smlabb q_value, coeff, q_value, round\n    }\n\n    return q_value;\n}\n\n__inline int32 smulbb(int32 q_scale, int32 coeff)\n{\n    int32 q_value;\n\n    __asm\n    {\n        smulbb  q_value, q_scale, coeff\n    }\n\n    return q_value;\n}\n\n__inline int32 coeff_dequant_mpeg(int32 q_value, int32 stepsize, int32 QP, int32 tmp)\n{\n    /* tmp must have value of 2047 */\n    int32 coeff;\n    __asm\n    {\n        movs    coeff, q_value, lsl #1\n        smulbb  stepsize, stepsize, QP\n        addgt   coeff, coeff, #1\n        sublt   coeff, coeff, #1\n        smulbb  q_value, coeff, stepsize\n        addlt   q_value, q_value, #15\n        mov     q_value, q_value, asr #4\n        add     coeff, q_value, tmp\n        subs    coeff, coeff, #0xf00\n        subcss  coeff, coeff, #0xfe\n        eorhi   q_value, tmp, q_value, asr #31\n    }\n\n    return q_value;\n}\n\n\n#else // not ARMV5TE\n\n__inline int32 aan_scale(int32 q_value, int32 coeff,\n                         int32 round, int32 QPdiv2)\n{\n    __asm\n    {\n        mla q_value, coeff, q_value, round\n        movs       coeff, q_value, asr #16\n        addle   coeff, coeff, QPdiv2\n        subgt   coeff, coeff, QPdiv2\n    }\n\n    return coeff;\n}\n\n__inline int32 coeff_quant(int32 coeff, int32 q_scale, int32 shift)\n{\n    int32 q_value;\n\n    __asm\n    {\n        mul q_value, q_scale, coeff    /*mov    coeff, coeff, lsl #14*/\n        mov     coeff, q_value, asr shift   /*smull tmp, coeff, q_scale, coeff*/\n        add q_value, coeff, coeff, lsr #31\n    }\n\n\n    return q_value;\n}\n\n\n__inline int32 coeff_dequant(int32 q_value, int32 QPx2, int32 Addition, int32 tmp)\n{\n    int32 coeff;\n\n    __asm\n    {\n        cmp     q_value, #0\n        mul coeff, q_value, QPx2\n        sublt   coeff, coeff, Addition\n        addge   coeff, coeff, Addition\n        add     q_value, coeff, tmp\n        subs    q_value, q_value, #3840\n        subcss  q_value, q_value, #254\n        eorhi   coeff, tmp, coeff, asr #31\n    }\n\n    return coeff;\n}\n\n__inline int32 smlabb(int32 q_value, int32 coeff, int32 round)\n{\n    __asm\n    {\n        mla q_value, coeff, q_value, round\n    }\n\n    return q_value;\n}\n\n__inline int32 smulbb(int32 q_scale, int32 coeff)\n{\n    int32 q_value;\n\n    __asm\n    {\n        mul q_value, q_scale, coeff\n    }\n\n    return q_value;\n}\n\n\n__inline int32 coeff_dequant_mpeg(int32 q_value, int32 stepsize, int32 QP, int32 tmp)\n{\n    /* tmp must have value of 2047 */\n    int32 coeff;\n    __asm\n    {\n        movs    coeff, q_value, lsl #1\n        mul  stepsize, stepsize, QP\n        addgt   coeff, coeff, #1\n        sublt   coeff, coeff, #1\n        mul q_value, coeff, stepsize\n        addlt   q_value, q_value, #15\n        mov     q_value, q_value, asr #4\n        add     coeff, q_value, tmp\n        subs    coeff, coeff, #0xf00\n        subcss  coeff, coeff, #0xfe\n        eorhi   q_value, tmp, q_value, asr #31\n    }\n\n    return q_value;\n}\n\n\n#endif\n\n__inline int32  coeff_clip(int32 q_value, int32 ac_clip)\n{\n    int32 coeff;\n\n    __asm\n    {\n        add     coeff, q_value, ac_clip\n        subs    coeff, coeff, ac_clip, lsl #1\n        eorhi   q_value, ac_clip, q_value, asr #31\n    }\n\n    return q_value;\n}\n\n__inline int32 aan_dc_scale(int32 coeff, int32 QP)\n{\n\n    __asm\n    {\n        cmp   coeff, #0\n        addle   coeff, coeff, QP, asr #1\n        subgt   coeff, coeff, QP, asr #1\n    }\n\n    return coeff;\n}\n\n__inline int32 clip_2047(int32 q_value, int32 tmp)\n{\n    /* tmp must have value of 2047 */\n    int32 coeff;\n\n    __asm\n    {\n        add     coeff, q_value, tmp\n        subs    coeff, coeff, #0xf00\n        subcss  coeff, coeff, #0xfe\n        eorhi   q_value, tmp, q_value, asr #31\n    }\n\n    return q_value;\n}\n\n__inline int32 coeff_dequant_mpeg_intra(int32 q_value, int32 tmp)\n{\n    int32 coeff;\n\n    __asm\n    {\n        movs    q_value, q_value, lsl #1\n        addlt   q_value, q_value, #15\n        mov     q_value, q_value, asr #4\n        add     coeff, q_value, tmp\n        subs    coeff, coeff, #0xf00\n        subcss  coeff, coeff, #0xfe\n        eorhi   q_value, tmp, q_value, asr #31\n    }\n\n    return q_value;\n}\n\n#elif (defined(PV_ARM_GCC_V5)) /* ARM GNU COMPILER  */\n\n__inline int32 aan_scale(int32 q_value, int32 coeff,\n                         int32 round, int32 QPdiv2)\n{\n    register int32 out;\n    register int32 qv = q_value;\n    register int32 cf = coeff;\n    register int32 rr = round;\n    register int32 qp = QPdiv2;\n\n    asm volatile(\"smlabb %0, %2, %1, %3\\n\\t\"\n                 \"movs %0, %0, asr #16\\n\\t\"\n                 \"addle %0, %0, %4\\n\\t\"\n                 \"subgt %0, %0, %4\"\n             : \"=&r\"(out)\n                         : \"r\"(qv),\n                         \"r\"(cf),\n                         \"r\"(rr),\n                         \"r\"(qp));\n    return out;\n}\n\n__inline int32 coeff_quant(int32 coeff, int32 q_scale, int32 shift)\n{\n    register int32 out;\n    register int32 temp1;\n    register int32 cc = coeff;\n    register int32 qs = q_scale;\n    register int32 ss = shift;\n\n    asm volatile(\"smulbb %0, %3, %2\\n\\t\"\n                 \"mov %1, %0, asr %4\\n\\t\"\n                 \"add %0, %1, %1, lsr #31\"\n             : \"=&r\"(out),\n                 \"=&r\"(temp1)\n                         : \"r\"(cc),\n                         \"r\"(qs),\n                         \"r\"(ss));\n\n    return out;\n}\n\n__inline int32 coeff_clip(int32 q_value, int32 ac_clip)\n{\n    register int32 coeff;\n\n    asm volatile(\"add   %1, %0, %2\\n\\t\"\n                 \"subs  %1, %1, %2, lsl #1\\n\\t\"\n                 \"eorhi %0, %2, %0, asr #31\"\n             : \"+r\"(q_value),\n                 \"=&r\"(coeff)\n                         : \"r\"(ac_clip));\n\n    return q_value;\n}\n\n__inline int32 coeff_dequant(int32 q_value, int32 QPx2, int32 Addition, int32 tmp)\n{\n    register int32 out;\n    register int32 temp1;\n    register int32 qv = q_value;\n    register int32 qp = QPx2;\n    register int32 aa = Addition;\n    register int32 tt = tmp;\n\n    asm volatile(\"cmp    %2, #0\\n\\t\"\n                 \"mul    %0, %2, %3\\n\\t\"\n                 \"sublt  %0, %0, %4\\n\\t\"\n                 \"addge  %0, %0, %4\\n\\t\"\n                 \"add    %1, %0, %5\\n\\t\"\n                 \"subs   %1, %1, #3840\\n\\t\"\n                 \"subcss %1, %1, #254\\n\\t\"\n                 \"eorhi  %0, %5, %0, asr #31\"\n             : \"=&r\"(out),\n                 \"=&r\"(temp1)\n                         : \"r\"(qv),\n                         \"r\"(qp),\n                         \"r\"(aa),\n                         \"r\"(tt));\n\n    return out;\n}\n\n__inline int32 smlabb(int32 q_value, int32 coeff, int32 round)\n{\n    register int32 out;\n    register int32 aa = (int32)q_value;\n    register int32 bb = (int32)coeff;\n    register int32 cc = (int32)round;\n\n    asm volatile(\"smlabb %0, %1, %2, %3\"\n             : \"=&r\"(out)\n                         : \"r\"(aa),\n                         \"r\"(bb),\n                         \"r\"(cc));\n    return out;\n}\n\n__inline int32 smulbb(int32 q_scale, int32 coeff)\n{\n    register int32 out;\n    register int32 aa = (int32)q_scale;\n    register int32 bb = (int32)coeff;\n\n    asm volatile(\"smulbb %0, %1, %2\"\n             : \"=&r\"(out)\n                         : \"r\"(aa),\n                         \"r\"(bb));\n    return out;\n}\n\n__inline int32 aan_dc_scale(int32 coeff, int32 QP)\n{\n    register int32 out;\n    register int32 cc = coeff;\n    register int32 qp = QP;\n\n    asm volatile(\"cmp %1, #0\\n\\t\"\n                 \"addle %0, %1, %2, asr #1\\n\\t\"\n                 \"subgt %0, %1, %2, asr #1\"\n             : \"=&r\"(out)\n                         : \"r\"(cc),\n                         \"r\"(qp));\n    return out;\n}\n\n__inline int32 clip_2047(int32 q_value, int32 tmp)\n{\n    register int32 coeff;\n    asm volatile(\"add    %1, %0, %2\\n\\t\"\n                 \"subs   %1, %1, #0xF00\\n\\t\"\n                 \"subcss %1, %1, #0xFE\\n\\t\"\n                 \"eorhi  %0, %2, %0, asr #31\"\n             : \"+r\"(q_value),\n                 \"=&r\"(coeff)\n                         : \"r\"(tmp));\n\n    return q_value;\n}\n\n__inline int32 coeff_dequant_mpeg(int32 q_value, int32 stepsize, int32 QP, int32 tmp)\n{\n    register int32 out;\n    register int32 temp1;\n    register int32 qv = q_value;\n    register int32 ss = stepsize;\n    register int32 qp = QP;\n    register int32 tt = tmp;\n\n    asm volatile(\"movs    %1, %2, lsl #1\\n\\t\"\n                 \"mul     %0, %3, %4\\n\\t\"\n                 \"addgt   %1, %1, #1\\n\\t\"\n                 \"sublt   %1, %1, #1\\n\\t\"\n                 \"mul     %0, %1, %0\\n\\t\"\n                 \"addlt   %0, %0, #15\\n\\t\"\n                 \"mov     %0, %0, asr #4\\n\\t\"\n                 \"add     %1, %0, %5\\n\\t\"\n                 \"subs    %1, %1, #0xF00\\n\\t\"\n                 \"subcss  %1, %1, #0xFE\\n\\t\"\n                 \"eorhi   %0, %5, %0, asr #31\"\n             : \"=&r\"(out),\n                 \"=&r\"(temp1)\n                         : \"r\"(qv),\n                         \"r\"(ss),\n                         \"r\"(qp),\n                         \"r\"(tt));\n\n    return out;\n\n}\n\n__inline int32 coeff_dequant_mpeg_intra(int32 q_value, int32 tmp)\n{\n    register int32 out;\n    register int32 temp1;\n    register int32 qv = q_value;\n    register int32 tt = tmp;\n\n    asm volatile(\"movs    %1, %2, lsl #1\\n\\t\"\n                 \"addlt   %1, %1, #15\\n\\t\"\n                 \"mov     %0, %1, asr #4\\n\\t\"\n                 \"add     %1, %0, %3\\n\\t\"\n                 \"subs    %1, %1, #0xF00\\n\\t\"\n                 \"subcss  %1, %1, #0xFE\\n\\t\"\n                 \"eorhi   %0, %3, %0, asr #31\"\n             : \"=&r\"(out),\n                 \"=&r\"(temp1)\n                         : \"r\"(qv),\n                         \"r\"(tt));\n    return out;\n}\n\n\n#endif // Platform\n\n\n#endif //_FASTQUANT_INLINE_H_\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/findhalfpel.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"mp4def.h\"\n#include \"mp4enc_lib.h\"\n#include \"mp4lib_int.h\"\n#include \"m4venc_oscl.h\"\n\n/* 3/29/01 fast half-pel search based on neighboring guess */\n/* value ranging from 0 to 4, high complexity (more accurate) to\n   low complexity (less accurate) */\n#define HP_DISTANCE_TH      2  /* half-pel distance threshold */\n\n#define PREF_16_VEC 129     /* 1MV bias versus 4MVs*/\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n    void GenerateSearchRegion(UChar *searchPadding, UChar *ref, Int width, Int height,\n    Int ilow, Int ihigh, Int jlow, Int jhigh);\n\n    void InterpDiag(UChar *prev, Int lx, UChar *pred_block);\n    void InterpHorz(UChar *prev, Int lx, UChar *pred_block);\n    void InterpVert(UChar *prev, Int lx, UChar *pred_block);\n#ifdef __cplusplus\n}\n#endif\n\n\nconst static Int distance_tab[9][9] =   /* [hp_guess][k] */\n{\n    {0, 1, 1, 1, 1, 1, 1, 1, 1},\n    {1, 0, 1, 2, 3, 4, 3, 2, 1},\n    {1, 0, 0, 0, 1, 2, 3, 2, 1},\n    {1, 2, 1, 0, 1, 2, 3, 4, 3},\n    {1, 2, 1, 0, 0, 0, 1, 2, 3},\n    {1, 4, 3, 2, 1, 0, 1, 2, 3},\n    {1, 2, 3, 2, 1, 0, 0, 0, 1},\n    {1, 2, 3, 4, 3, 2, 1, 0, 1},\n    {1, 0, 1, 2, 3, 2, 1, 0, 0}\n};\n\n\n/*=====================================================================\n    Function:   FindHalfPelMB\n    Date:       10/7/2000\n    Purpose:    Find half pel resolution MV surrounding the full-pel MV\n=====================================================================*/\n\nvoid FindHalfPelMB(VideoEncData *video, UChar *cur, MOT *mot, UChar *ncand,\n                   Int xpos, Int ypos, Int *xhmin, Int *yhmin, Int hp_guess)\n{\n//  hp_mem = ULong *vertArray; /* 20x17 */\n//           ULong *horzArray; /* 20x16 */\n//           ULong *diagArray; /* 20x17 */\n    Int dmin, d;\n\n    Int xh, yh;\n    Int k, kmin = 0;\n    Int imin, jmin, ilow, jlow;\n    Int h263_mode = video->encParams->H263_Enabled; /*  3/29/01 */\n    Int in_range[9] = {0, 1, 1, 1, 1, 1, 1, 1, 1}; /*  3/29/01 */\n    Int range = video->encParams->SearchRange;\n    Int lx = video->currVop->pitch;\n    Int width = video->currVop->width; /*  padding */\n    Int height = video->vol[video->currLayer]->height;\n    Int(**SAD_MB_HalfPel)(UChar*, UChar*, Int, void*) =\n        video->functionPointer->SAD_MB_HalfPel;\n    void *extra_info = video->sad_extra_info;\n\n    Int next_hp_pos[9][2] = {{0, 0}, {2, 0}, {1, 1}, {0, 2}, { -1, 1}, { -2, 0}, { -1, -1}, {0, -2}, {0, -1}};\n    Int next_ncand[9] = {0, 1 , lx, lx, 0, -1, -1, -lx, -lx};\n\n    cur = video->currYMB;\n\n    /**************** check range ***************************/\n    /*  3/29/01 */\n    imin = xpos + (mot[0].x >> 1);\n    jmin = ypos + (mot[0].y >> 1);\n    ilow = xpos - range;\n    jlow = ypos - range;\n\n    if (!h263_mode)\n    {\n        if (imin <= -15 || imin == ilow)\n            in_range[1] = in_range[7] = in_range[8] = 0;\n        else if (imin >= width - 1)\n            in_range[3] = in_range[4] = in_range[5] = 0;\n        if (jmin <= -15 || jmin == jlow)\n            in_range[1] = in_range[2] = in_range[3] = 0;\n        else if (jmin >= height - 1)\n            in_range[5] = in_range[6] = in_range[7] = 0;\n    }\n    else\n    {\n        if (imin <= 0 || imin == ilow)\n            in_range[1] = in_range[7] = in_range[8] = 0;\n        else if (imin >= width - 16)\n            in_range[3] = in_range[4] = in_range[5] = 0;\n        if (jmin <= 0 || jmin == jlow)\n            in_range[1] = in_range[2] = in_range[3] = 0;\n        else if (jmin >= height - 16)\n            in_range[5] = in_range[6] = in_range[7] = 0;\n    }\n\n    xhmin[0] = 0;\n    yhmin[0] = 0;\n    dmin = mot[0].sad;\n\n    xh = 0;\n    yh = -1;\n    ncand -= lx; /* initial position */\n\n    for (k = 2; k <= 8; k += 2)\n    {\n        if (distance_tab[hp_guess][k] < HP_DISTANCE_TH)\n        {\n            if (in_range[k])\n            {\n                d = (*(SAD_MB_HalfPel[((yh&1)<<1)+(xh&1)]))(ncand, cur, (dmin << 16) | lx, extra_info);\n\n                if (d < dmin)\n                {\n                    dmin = d;\n                    xhmin[0] = xh;\n                    yhmin[0] = yh;\n                    kmin = k;\n                }\n                else if (d == dmin &&\n                         PV_ABS(mot[0].x + xh) + PV_ABS(mot[0].y + yh) < PV_ABS(mot[0].x + xhmin[0]) + PV_ABS(mot[0].y + yhmin[0]))\n                {\n                    xhmin[0] = xh;\n                    yhmin[0] = yh;\n                    kmin = k;\n                }\n\n            }\n        }\n        xh += next_hp_pos[k][0];\n        yh += next_hp_pos[k][1];\n        ncand += next_ncand[k];\n\n        if (k == 8)\n        {\n            if (xhmin[0] != 0 || yhmin[0] != 0)\n            {\n                k = -1;\n                hp_guess = kmin;\n            }\n        }\n    }\n\n    mot[0].sad = dmin;\n    mot[0].x += xhmin[0];\n    mot[0].y += yhmin[0];\n\n    return ;\n}\n\n#ifndef NO_INTER4V\n/*=====================================================================\n    Function:   FindHalfPelBlk\n    Date:       10/7/2000\n    Purpose:    Find half pel resolution MV surrounding the full-pel MV\n                And decide between 1MV or 4MV mode\n=====================================================================*/\n///// THIS FUNCTION IS NOT WORKING!!! NEED TO BE RIVISITED\n\nInt FindHalfPelBlk(VideoEncData *video, UChar *cur, MOT *mot, Int sad16, UChar *ncand8[],\n                   UChar *mode, Int xpos, Int ypos, Int *xhmin, Int *yhmin, UChar *hp_mem)\n{\n    Int k, comp;\n    Int xh, yh;//, xhmin, yhmin;\n    Int imin, jmin, ilow, jlow;\n    Int height;\n    UChar *cand, *cur8;\n    UChar *hmem;//[17*17]; /* half-pel memory */\n    Int d, dmin, sad8;\n    Int lx = video->currVop->pitch;\n    Int width = video->currVop->width; /* , padding */\n    Int(*SAD_Blk_HalfPel)(UChar*, UChar*, Int, Int, Int, Int, Int, void*) = video->functionPointer->SAD_Blk_HalfPel;\n    void *extra_info = video->sad_extra_info;\n    Int in_range[8]; /*  3/29/01 */\n    Int range = video->encParams->SearchRange;\n    Int swidth;\n    Int next_hp_pos[8][2] = {{1, 0}, {1, 0}, {0, 1}, {0, 1}, { -1, 0}, { -1, 0}, {0, -1}, {0, -1}};\n\n    height = video->vol[video->currLayer]->height;\n\n    hmem = hp_mem;\n    sad8 = 0;\n    for (comp = 0; comp < 4; comp++)\n    {\n#ifdef _SAD_STAT\n        num_HP_Blk++;\n#endif\n        /**************** check range ***************************/\n        /*  3/29/01 */\n        M4VENC_MEMSET(in_range, 1, sizeof(Int) << 3);\n        imin = xpos + ((comp & 1) << 3) + (mot[comp+1].x >> 1);\n        jmin = ypos + ((comp & 2) << 2) + (mot[comp+1].y >> 1);\n        ilow = xpos + ((comp & 1) << 3) - range;\n        jlow = ypos + ((comp & 2) << 2) - range;\n\n        if (imin <= -15 || imin == ilow)\n            in_range[0] = in_range[6] = in_range[7] = 0;\n        else if (imin >= width - 1)\n            in_range[2] = in_range[3] = in_range[4] = 0;\n\n        if (jmin <= -15 || jmin == jlow)\n            in_range[0] = in_range[1] = in_range[2] = 0;\n        else if (jmin >= height - 1)\n            in_range[4] = in_range[5] = in_range[6] = 0;\n\n        /**************** half-pel search ***********************/\n        cur8 = cur + ((comp & 1) << 3) + ((comp & 2) << 2) * width ;\n\n        /* generate half-pel search region */\n        {\n            cand = ncand8[comp+1];\n            swidth = lx;\n        }\n\n        xhmin[comp+1] = 0;\n        yhmin[comp+1] = 0;\n        dmin = mot[comp+1].sad;\n\n        xh = -1;\n        yh = -1;\n        for (k = 0; k < 8; k++)\n        {\n            if (in_range[k])\n            {\n                d = (*SAD_Blk_HalfPel)(cand, cur8, dmin, lx, swidth, xh, yh, extra_info);\n\n                if (d < dmin)\n                {\n                    dmin = d;\n                    xhmin[comp+1] = xh;\n                    yhmin[comp+1] = yh;\n                }\n            }\n            xh += next_hp_pos[k][0];\n            yh += next_hp_pos[k][1];\n        }\n        /********************************************/\n        mot[comp+1].x += xhmin[comp+1];\n        mot[comp+1].y += yhmin[comp+1];\n        mot[comp+1].sad = dmin;\n        sad8 += dmin;\n\n        if (sad8 >= sad16 - PREF_16_VEC)\n        {\n            *mode = MODE_INTER;\n            for (k = 1; k <= 4; k++)\n            {\n                mot[k].sad = (mot[0].sad + 2) >> 2;\n                mot[k].x = mot[0].x;\n                mot[k].y = mot[0].y;\n            }\n            return sad8;\n        }\n\n        hmem += (10 * 10);\n    }\n\n    *mode = MODE_INTER4V;\n\n    return sad8;\n}\n#endif /* NO_INTER4V */\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/m4venc_oscl.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*********************************************************************************/\n/* Description: Created for abstracting out OSCL such that the code can be used */\n/*          by both V3 and V4 OSCL library. This file is for V4.                */\n/*********************************************************************************/\n\n#ifndef _M4VENC_OSCL_H_\n#define _M4VENC_OSCL_H_\n\n\n#define OSCL_DISABLE_WARNING_CONV_POSSIBLE_LOSS_OF_DATA\n#include \"osclconfig_compiler_warnings.h\"\n\n#include \"oscl_mem.h\"\n\n#define M4VENC_MALLOC(size)   oscl_malloc(size)\n#define M4VENC_FREE(ptr)                oscl_free(ptr)\n\n#define M4VENC_MEMSET(ptr,val,size)     oscl_memset(ptr,val,size)\n#define M4VENC_MEMCPY(dst,src,size)     oscl_memcpy(dst,src,size)\n\n#include \"oscl_math.h\"\n#define M4VENC_LOG(x)                   oscl_log(x)\n#define M4VENC_SQRT(x)                  oscl_sqrt(x)\n#define M4VENC_POW(x,y)                 oscl_pow(x,y)\n\n#define M4VENC_HAS_SYMBIAN_SUPPORT  OSCL_HAS_SYMBIAN_SUPPORT\n\n#endif //_M4VENC_OSCL_H_\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/me_utils.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"mp4def.h\"\n#include \"mp4enc_lib.h\"\n#include \"mp4lib_int.h\"\n#include \"m4venc_oscl.h\"\n\n#define VOP_OFFSET  ((lx<<4)+16)  /* for offset to image area */\n#define CVOP_OFFSET ((lx<<2)+8)\n\n#define PREF_INTRA  512     /* bias for INTRA coding */\n\n/*===============================================================\n    Function:   ChooseMode\n    Date:       09/21/2000\n    Purpose:    Choosing between INTRA or INTER\n    Input/Output: Pointer to the starting point of the macroblock.\n    Note:\n===============================================================*/\nvoid ChooseMode_C(UChar *Mode, UChar *cur, Int lx, Int min_SAD)\n{\n    Int i, j;\n    Int MB_mean, A, tmp, Th;\n    Int offset = (lx >> 2) - 4;\n    UChar *p = cur;\n    Int *pint = (Int *) cur, temp = 0;\n    MB_mean = 0;\n    A = 0;\n    Th = (min_SAD - PREF_INTRA) >> 1;\n\n    for (j = 0; j < 8; j++)\n    {\n\n        /* Odd Rows */\n        temp += (*pint++) & 0x00FF00FF;\n        temp += (*pint++) & 0x00FF00FF;\n        temp += (*pint++) & 0x00FF00FF;\n        temp += (*pint++) & 0x00FF00FF;\n        pint += offset;\n\n        /* Even Rows */\n        temp += (*pint++ >> 8) & 0x00FF00FF;\n        temp += (*pint++ >> 8) & 0x00FF00FF;\n        temp += (*pint++ >> 8) & 0x00FF00FF;\n        temp += (*pint++ >> 8) & 0x00FF00FF;\n        pint += offset;\n\n    }\n\n    MB_mean = (((temp & 0x0000FFFF)) + ((temp & 0xFFFF0000) >> 16)) >> 7;\n\n    p = cur;\n    offset = lx - 16;\n    for (j = 0; j < 16; j++)\n    {\n        temp = (j & 1);\n        p += temp;\n        i = 8;\n        while (i--)\n        {\n            tmp = *p - MB_mean;\n            p += 2;\n            if (tmp > 0) A += tmp;\n            else    A -= tmp;\n        }\n\n        if (A >= Th)\n        {\n            *Mode = MODE_INTER;\n            return ;\n        }\n        p += (offset - temp);\n    }\n\n    if (A < Th)\n        *Mode = MODE_INTRA;\n    else\n        *Mode = MODE_INTER;\n\n    return ;\n}\n\n\n/*===============================================================\n    Function:   GetHalfPelMBRegion\n    Date:       09/17/2000\n    Purpose:    Interpolate the search region for half-pel search\n    Input/Output:   Center of the search, Half-pel memory, width\n    Note:       rounding type should be parameterized.\n                Now fixed it to zero!!!!!!\n\n===============================================================*/\n\n\nvoid GetHalfPelMBRegion_C(UChar *cand, UChar *hmem, Int lx)\n{\n    Int i, j;\n    UChar *p1, *p2, *p3, *p4;\n    UChar *hmem1 = hmem;\n    UChar *hmem2 = hmem1 + 33;\n    Int offset = lx - 17;\n\n    p1 = cand - lx - 1;\n    p2 = cand - lx;\n    p3 = cand - 1;\n    p4 = cand;\n\n    for (j = 0; j < 16; j++)\n    {\n        for (i = 0; i < 16; i++)\n        {\n            *hmem1++ = ((*p1++) + *p2 + *p3 + *p4 + 2) >> 2;\n            *hmem1++ = ((*p2++) + *p4 + 1) >> 1;\n            *hmem2++ = ((*p3++) + *p4 + 1) >> 1;\n            *hmem2++ = *p4++;\n        }\n        /*  last pixel */\n        *hmem1++ = ((*p1++) + (*p2++) + *p3 + *p4 + 2) >> 2;\n        *hmem2++ = ((*p3++) + (*p4++) + 1) >> 1;\n        hmem1 += 33;\n        hmem2 += 33;\n        p1 += offset;\n        p2 += offset;\n        p3 += offset;\n        p4 += offset;\n    }\n    /* last row */\n    for (i = 0; i < 16; i++)\n    {\n        *hmem1++ = ((*p1++) + *p2 + (*p3++) + *p4 + 2) >> 2;\n        *hmem1++ = ((*p2++) + (*p4++) + 1) >> 1;\n\n    }\n    *hmem1 = (*p1 + *p2 + *p3 + *p4 + 2) >> 2;\n\n    return ;\n}\n\n/*===============================================================\n   Function:    GetHalfPelBlkRegion\n   Date:        09/20/2000\n   Purpose: Interpolate the search region for half-pel search\n            in 4MV mode.\n   Input/Output:    Center of the search, Half-pel memory, width\n   Note:        rounding type should be parameterized.\n            Now fixed it to zero!!!!!!\n\n===============================================================*/\n\n\nvoid GetHalfPelBlkRegion(UChar *cand, UChar *hmem, Int lx)\n{\n    Int i, j;\n    UChar *p1, *p2, *p3, *p4;\n    UChar *hmem1 = hmem;\n    UChar *hmem2 = hmem1 + 17;\n    Int offset = lx - 9;\n\n    p1 = cand - lx - 1;\n    p2 = cand - lx;\n    p3 = cand - 1;\n    p4 = cand;\n\n    for (j = 0; j < 8; j++)\n    {\n        for (i = 0; i < 8; i++)\n        {\n            *hmem1++ = ((*p1++) + *p2 + *p3 + *p4 + 2) >> 2;\n            *hmem1++ = ((*p2++) + *p4 + 1) >> 1;\n            *hmem2++ = ((*p3++) + *p4 + 1) >> 1;\n            *hmem2++ = *p4++;\n        }\n        /*  last pixel */\n        *hmem1++ = ((*p1++) + (*p2++) + *p3 + *p4 + 2) >> 2;\n        *hmem2++ = ((*p3++) + (*p4++) + 1) >> 1;\n        hmem1 += 17;\n        hmem2 += 17;\n        p1 += offset;\n        p2 += offset;\n        p3 += offset;\n        p4 += offset;\n    }\n    /* last row */\n    for (i = 0; i < 8; i++)\n    {\n        *hmem1++ = ((*p1++) + *p2 + (*p3++) + *p4 + 2) >> 2;\n        *hmem1++ = ((*p2++) + (*p4++) + 1) >> 1;\n\n    }\n    *hmem1 = (*p1 + *p2 + *p3 + *p4 + 2) >> 2;\n\n    return ;\n}\n\n\n/*=====================================================================\n    Function:   PaddingEdge\n    Date:       09/16/2000\n    Purpose:    Pad edge of a Vop\n    Modification: 09/20/05.\n=====================================================================*/\n\nvoid  PaddingEdge(Vop *refVop)\n{\n    UChar *src, *dst;\n    Int i;\n    Int pitch, width, height;\n    ULong temp1, temp2;\n\n    width = refVop->width;\n    height = refVop->height;\n    pitch = refVop->pitch;\n\n    /* pad top */\n    src = refVop->yChan;\n\n    temp1 = *src; /* top-left corner */\n    temp2 = src[width-1]; /* top-right corner */\n    temp1 |= (temp1 << 8);\n    temp1 |= (temp1 << 16);\n    temp2 |= (temp2 << 8);\n    temp2 |= (temp2 << 16);\n\n    dst = src - (pitch << 4);\n\n    *((ULong*)(dst - 16)) = temp1;\n    *((ULong*)(dst - 12)) = temp1;\n    *((ULong*)(dst - 8)) = temp1;\n    *((ULong*)(dst - 4)) = temp1;\n\n    M4VENC_MEMCPY(dst, src, width);\n\n    *((ULong*)(dst += width)) = temp2;\n    *((ULong*)(dst + 4)) = temp2;\n    *((ULong*)(dst + 8)) = temp2;\n    *((ULong*)(dst + 12)) = temp2;\n\n    dst = dst - width - 16;\n\n    i = 15;\n    while (i--)\n    {\n        M4VENC_MEMCPY(dst + pitch, dst, pitch);\n        dst += pitch;\n    }\n\n    /* pad sides */\n    dst += (pitch + 16);\n    src = dst;\n    i = height;\n    while (i--)\n    {\n        temp1 = *src;\n        temp2 = src[width-1];\n        temp1 |= (temp1 << 8);\n        temp1 |= (temp1 << 16);\n        temp2 |= (temp2 << 8);\n        temp2 |= (temp2 << 16);\n\n        *((ULong*)(dst - 16)) = temp1;\n        *((ULong*)(dst - 12)) = temp1;\n        *((ULong*)(dst - 8)) = temp1;\n        *((ULong*)(dst - 4)) = temp1;\n\n        *((ULong*)(dst += width)) = temp2;\n        *((ULong*)(dst + 4)) = temp2;\n        *((ULong*)(dst + 8)) = temp2;\n        *((ULong*)(dst + 12)) = temp2;\n\n        src += pitch;\n        dst = src;\n    }\n\n    /* pad bottom */\n    dst -= 16;\n    i = 16;\n    while (i--)\n    {\n        M4VENC_MEMCPY(dst, dst - pitch, pitch);\n        dst += pitch;\n    }\n\n\n    return ;\n}\n\n/*===================================================================\n    Function:   ComputeMBSum\n    Date:       10/28/2000\n    Purpose:    Compute sum of absolute value (SAV) of blocks in a macroblock\n                in INTRA mode needed for rate control. Thus, instead of\n                computing the SAV, we can compute first order moment or\n                variance .\n\n    11/28/00:    add MMX\n    9/3/01:      do parallel comp for C function.\n===================================================================*/\nvoid ComputeMBSum_C(UChar *cur, Int lx, MOT *mot_mb)\n{\n    Int j;\n    Int *cInt, *cInt2;\n    Int sad1 = 0, sad2 = 0, sad3 = 0, sad4 = 0;\n    Int tmp, tmp2, mask = 0x00FF00FF;\n\n    cInt = (Int*)cur;   /* make sure this is word-align */\n    cInt2 = (Int*)(cur + (lx << 3));\n    j = 8;\n    while (j--)\n    {\n        tmp = cInt[3];  /* load 4 pixels at a time */\n        tmp2 = tmp & mask;\n        tmp = (tmp >> 8) & mask;\n        tmp += tmp2;\n        sad2 += tmp;\n        tmp = cInt[2];\n        tmp2 = tmp & mask;\n        tmp = (tmp >> 8) & mask;\n        tmp += tmp2;\n        sad2 += tmp;\n        tmp = cInt[1];\n        tmp2 = tmp & mask;\n        tmp = (tmp >> 8) & mask;\n        tmp += tmp2;\n        sad1 += tmp;\n        tmp = *cInt;\n        cInt += (lx >> 2);\n        tmp2 = tmp & mask;\n        tmp = (tmp >> 8) & mask;\n        tmp += tmp2;\n        sad1 += tmp;\n\n        tmp = cInt2[3];\n        tmp2 = tmp & mask;\n        tmp = (tmp >> 8) & mask;\n        tmp += tmp2;\n        sad4 += tmp;\n        tmp = cInt2[2];\n        tmp2 = tmp & mask;\n        tmp = (tmp >> 8) & mask;\n        tmp += tmp2;\n        sad4 += tmp;\n        tmp = cInt2[1];\n        tmp2 = tmp & mask;\n        tmp = (tmp >> 8) & mask;\n        tmp += tmp2;\n        sad3 += tmp;\n        tmp = *cInt2;\n        cInt2 += (lx >> 2);\n        tmp2 = tmp & mask;\n        tmp = (tmp >> 8) & mask;\n        tmp += tmp2;\n        sad3 += tmp;\n    }\n    sad1 += (sad1 << 16);\n    sad2 += (sad2 << 16);\n    sad3 += (sad3 << 16);\n    sad4 += (sad4 << 16);\n    sad1 >>= 16;\n    sad2 >>= 16;\n    sad3 >>= 16;\n    sad4 >>= 16;\n\n    mot_mb[1].sad = sad1;\n    mot_mb[2].sad = sad2;\n    mot_mb[3].sad = sad3;\n    mot_mb[4].sad = sad4;\n    mot_mb[0].sad = sad1 + sad2 + sad3 + sad4;\n\n    return ;\n}\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/motion_comp.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"oscl_base_macros.h\"  // for OSCL_UNUSED_ARG \n#include \"mp4lib_int.h\"\n#include \"mp4enc_lib.h\"\n\n//const static Int roundtab4[] = {0,1,1,1};\n//const static Int roundtab8[] = {0,0,1,1,1,1,1,2};\n//const static Int roundtab12[] = {0,0,0,1,1,1,1,1,1,1,2,2};\nconst static Int roundtab16[] = {0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2};\n\n#define FORWARD_MODE    1\n#define BACKWARD_MODE   2\n#define BIDIRECTION_MODE    3\n#define DIRECT_MODE         4\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n    /*Function Prototype */\n    /* no-edge padding */\n    Int EncGetPredOutside(Int xpos, Int ypos, UChar *c_prev, UChar *rec,\n    Int width, Int height, Int rnd1);\n\n    void Copy_MB_from_Vop(UChar *comp, Int yChan[][NCOEFF_BLOCK], Int width);\n    void Copy_B_from_Vop(UChar *comp, Int cChan[], Int width);\n    void Copy_MB_into_Vop(UChar *comp, Int yChan[][NCOEFF_BLOCK], Int width);\n    void Copy_B_into_Vop(UChar *comp, Int cChan[], Int width);\n    void get_MB(UChar *c_prev, UChar *c_prev_u  , UChar *c_prev_v,\n                Short mb[6][64], Int lx, Int lx_uv);\n\n    Int GetPredAdvBy0x0(\n        UChar *c_prev,      /* i */\n        UChar *pred_block,      /* i */\n        Int lx,     /* i */\n        Int rnd1 /* i */\n    );\n\n    Int GetPredAdvBy0x1(\n        UChar *c_prev,      /* i */\n        UChar *pred_block,      /* i */\n        Int lx,     /* i */\n        Int rnd1 /* i */\n    );\n\n    Int GetPredAdvBy1x0(\n        UChar *c_prev,      /* i */\n        UChar *pred_block,      /* i */\n        Int lx,     /* i */\n        Int rnd1 /* i */\n    );\n\n    Int GetPredAdvBy1x1(\n        UChar *c_prev,      /* i */\n        UChar *pred_block,      /* i */\n        Int lx,     /* i */\n        Int rnd1 /* i */\n    );\n\n    static Int(*const GetPredAdvBTable[2][2])(UChar*, UChar*, Int, Int) =\n    {\n        {&GetPredAdvBy0x0, &GetPredAdvBy0x1},\n        {&GetPredAdvBy1x0, &GetPredAdvBy1x1}\n    };\n\n\n#ifdef __cplusplus\n}\n#endif\n\n\n/* ======================================================================== */\n/*  Function : getMotionCompensatedMB( )                                    */\n/*  Date     : 4/17/2001                                                    */\n/*  Purpose  : Get the motion compensate block into video->predictionMB     */\n/*              and generate video->predictionErrorMB                       */\n/*              modified from MBMotionComp() function in the decoder        */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nvoid getMotionCompensatedMB(VideoEncData *video, Int ind_x, Int ind_y, Int offset)\n{\n    Vop *prevVop = video->forwardRefVop; //reference frame\n    Vop *currVop = video->currVop;\n    Int mbnum = video->mbnum;       //mb index\n    MOT *mot = video->mot[mbnum];\n    Int ypos, xpos;\n    UChar *c_prev, *cu_prev, *cv_prev;\n    UChar *c_rec, *cu_rec, *cv_rec;\n    Int height, pitch, pitch_uv, height_uv;\n    Int mode = video->headerInfo.Mode[mbnum];  /* get mode */\n    Int dx, dy;\n    Int xpred, ypred;\n    Int xsum, ysum;\n    Int round1;\n\n    OSCL_UNUSED_ARG(offset);\n\n    round1 = (Int)(1 - video->currVop->roundingType);\n\n    pitch  = currVop->pitch;\n    height = currVop->height;\n    pitch_uv  = pitch >> 1;\n    height_uv = height >> 1;\n\n    ypos = ind_y << 4 ;\n    xpos = ind_x << 4 ;\n\n    c_rec = video->predictedMB;\n    cu_rec = video->predictedMB + 256;\n    cv_rec = video->predictedMB + 264;\n\n    if (mode == MODE_INTER || mode == MODE_INTER_Q)\n    {\n        /* Motion vector in x direction       */\n        dx = mot[0].x;\n        dy = mot[0].y;\n\n        c_prev  = prevVop->yChan;\n\n        xpred = (xpos << 1) + dx ;\n        ypred = (ypos << 1) + dy ;\n\n        /* Call function that performs luminance prediction */\n        EncPrediction_INTER(xpred, ypred, c_prev, c_rec,\n                            pitch, round1);\n\n        if ((dx & 3) == 0)  dx = dx >> 1;\n        else        dx = (dx >> 1) | 1;\n\n        if ((dy & 3) == 0)      dy = dy >> 1;\n        else        dy = (dy >> 1) | 1;\n\n        xpred = xpos + dx;\n        ypred = ypos + dy;\n\n        cu_prev = prevVop->uChan;\n        cv_prev = prevVop->vChan;\n\n        EncPrediction_Chrom(xpred, ypred, cu_prev, cv_prev, cu_rec, cv_rec,\n                            pitch_uv, (currVop->width) >> 1, height_uv, round1);\n    }\n#ifndef NO_INTER4V\n    else if (mode == MODE_INTER4V)\n    {\n        c_prev  = prevVop->yChan;\n        cu_prev = prevVop->uChan;\n        cv_prev = prevVop->vChan;\n\n        EncPrediction_INTER4V(xpos, ypos, mot, c_prev, c_rec,\n                              pitch, round1);\n\n        xsum = mot[1].x + mot[2].x + mot[3].x + mot[4].x;\n        ysum = mot[1].y + mot[2].y + mot[3].y + mot[4].y;\n\n        dx = PV_SIGN(xsum) * (roundtab16[(PV_ABS(xsum)) & 0xF] +\n                              (((PV_ABS(xsum)) >> 4) << 1));\n        dy = PV_SIGN(ysum) * (roundtab16[(PV_ABS(ysum)) & 0xF] +\n                              (((PV_ABS(ysum)) >> 4) << 1));\n\n        ypred = ypos + dy;\n        xpred = xpos + dx;\n\n        EncPrediction_Chrom(xpred, ypred, cu_prev, cv_prev, cu_rec, cv_rec,\n                            pitch_uv, (currVop->width) >> 1, height_uv, round1);\n    }\n#endif\n    else\n    {\n        ;//printf(\"Error, MODE_SKIPPED is not decided yet!\\n\");\n    }\n\n    return ;\n}\n\n/***************************************************************************\n    Function:   EncPrediction_INTER\n    Date:       04/17/2001\n    Purpose:    Get predicted area for luminance and compensate with the residue.\n                Modified from luminance_pred_mode_inter() in decoder.\n***************************************************************************/\n\nvoid EncPrediction_INTER(\n    Int xpred,          /* i */\n    Int ypred,          /* i */\n    UChar *c_prev,          /* i */\n    UChar *c_rec,       /* i */\n    Int lx,         /* i */\n    Int round1          /* i */\n)\n{\n    c_prev += (xpred >> 1) + ((ypred >> 1) * lx);\n\n    GetPredAdvBTable[ypred&1][xpred&1](c_prev, c_rec, lx, round1);\n\n    c_prev += B_SIZE;\n    c_rec += B_SIZE;\n\n    GetPredAdvBTable[ypred&1][xpred&1](c_prev, c_rec, lx, round1);\n\n    c_prev += (lx << 3) - B_SIZE;\n    c_rec += (16 << 3) - B_SIZE; /* padding */\n\n    GetPredAdvBTable[ypred&1][xpred&1](c_prev, c_rec, lx, round1);\n\n    c_prev += B_SIZE;\n    c_rec += B_SIZE;\n\n    GetPredAdvBTable[ypred&1][xpred&1](c_prev, c_rec, lx, round1);\n\n    return;\n}\n\n#ifndef NO_INTER4V\n/***************************************************************************\n    Function:   EncPrediction_INTER4V\n    Date:       04/17/2001\n    Purpose:    Get predicted area for luminance and compensate with the residue.\n                Modified from luminance_pred_mode_inter4v() in decoder.\n***************************************************************************/\n\nvoid EncPrediction_INTER4V(\n    Int xpos,           /* i */\n    Int ypos,           /* i */\n    MOT *mot,           /* i */\n    UChar *c_prev,          /* i */\n    UChar *c_rec,           /* i */\n    Int lx,         /* i */\n    Int round1          /* i */\n)\n{\n    Int ypred, xpred;\n\n    xpred = (Int)((xpos << 1) + mot[1].x);\n    ypred = (Int)((ypos << 1) + mot[1].y);\n\n    GetPredAdvBTable[ypred&1][xpred&1](c_prev + (xpred >> 1) + ((ypred >> 1)*lx),\n                                       c_rec, lx, round1);\n\n    c_rec += B_SIZE;\n\n    xpred = (Int)(((xpos + B_SIZE) << 1) + mot[2].x);\n    ypred = (Int)((ypos << 1) + mot[2].y);\n\n    GetPredAdvBTable[ypred&1][xpred&1](c_prev + (xpred >> 1) + ((ypred >> 1)*lx),\n                                       c_rec, lx, round1);\n\n    c_rec += (16 << 3) - B_SIZE; /* padding */\n\n    xpred = (Int)((xpos << 1) + mot[3].x);\n    ypred = (Int)(((ypos + B_SIZE) << 1) + mot[3].y);\n\n    GetPredAdvBTable[ypred&1][xpred&1](c_prev + (xpred >> 1) + ((ypred >> 1)*lx),\n                                       c_rec, lx, round1);\n\n    c_rec += B_SIZE;\n\n    xpred = (Int)(((xpos + B_SIZE) << 1) + mot[4].x);\n    ypred = (Int)(((ypos + B_SIZE) << 1) + mot[4].y);\n\n    GetPredAdvBTable[ypred&1][xpred&1](c_prev + (xpred >> 1) + ((ypred >> 1)*lx),\n                                       c_rec, lx, round1);\n\n    return;\n}\n#endif /* NO_INTER4V */\n\n/***************************************************************************\n    Function:   EncPrediction_Chrom\n    Date:       04/17/2001\n    Purpose:    Get predicted area for chrominance and compensate with the residue.\n                Modified from chrominance_pred() in decoder.\n***************************************************************************/\n\nvoid EncPrediction_Chrom(\n    Int xpred,          /* i */\n    Int ypred,          /* i */\n    UChar *cu_prev,         /* i */\n    UChar *cv_prev,         /* i */\n    UChar *cu_rec,\n    UChar *cv_rec,\n    Int lx,\n    Int width_uv,           /* i */\n    Int height_uv,          /* i */\n    Int round1          /* i */\n)\n{\n    /* check whether the MV points outside the frame */\n    /* Compute prediction for Chrominance b block (block[4]) */\n    if (xpred >= 0 && xpred <= ((width_uv << 1) - (2*B_SIZE)) && ypred >= 0 &&\n            ypred <= ((height_uv << 1) - (2*B_SIZE)))\n    {\n        /*****************************/\n        /* (x,y) is inside the frame */\n        /*****************************/\n\n        /* Compute prediction for Chrominance b (block[4]) */\n        GetPredAdvBTable[ypred&1][xpred&1](cu_prev + (xpred >> 1) + ((ypred >> 1)*lx),\n                                           cu_rec, lx, round1);\n\n        /* Compute prediction for Chrominance r (block[5]) */\n        GetPredAdvBTable[ypred&1][xpred&1](cv_prev + (xpred >> 1) + ((ypred >> 1)*lx),\n                                           cv_rec,  lx, round1);\n    }\n    else\n    {\n        /******************************/\n        /* (x,y) is outside the frame */\n        /******************************/\n\n        /* Compute prediction for Chrominance b (block[4]) */\n        EncGetPredOutside(xpred, ypred,\n                          cu_prev, cu_rec,\n                          width_uv, height_uv, round1);\n\n        /* Compute prediction for Chrominance r (block[5]) */\n        EncGetPredOutside(xpred, ypred,\n                          cv_prev, cv_rec,\n                          width_uv, height_uv, round1);\n    }\n\n    return;\n}\n/***************************************************************************\n    Function:   GetPredAdvancedB\n    Date:       04/17/2001\n    Purpose:    Get predicted area (block) and compensate with the residue.\n                - modified from GetPredAdvancedBAdd in decoder.\n    Intput/Output:\n    Modified:\n***************************************************************************/\n\nInt GetPredAdvBy0x0(\n    UChar *prev,        /* i */\n    UChar *rec,     /* i */\n    Int lx,     /* i */\n    Int rnd /* i */\n)\n{\n    Int i;      /* loop variable */\n    ULong  pred_word, word1, word2;\n    Int tmp;\n\n    OSCL_UNUSED_ARG(rnd);\n\n    /* initialize offset to adjust pixel counter */\n    /*    the next row; full-pel resolution      */\n\n    tmp = (ULong)prev & 0x3;\n\n    if (tmp == 0)  /* word-aligned */\n    {\n        rec -= 16; /* preset */\n        prev -= lx;\n\n        for (i = 8; i > 0; i--)\n        {\n            *((ULong*)(rec += 16)) = *((ULong*)(prev += lx));\n            *((ULong*)(rec + 4)) = *((ULong*)(prev + 4));\n        }\n        return 1;\n    }\n    else if (tmp == 1) /* first position */\n    {\n        prev--; /* word-aligned */\n        rec -= 16; /* preset */\n        prev -= lx;\n\n        for (i = 8; i > 0; i--)\n        {\n            word1 = *((ULong*)(prev += lx)); /* read 4 bytes, b4 b3 b2 b1 */\n            word2 = *((ULong*)(prev + 4));  /* read 4 bytes, b8 b7 b6 b5 */\n            word1 >>= 8; /* 0 b4 b3 b2 */\n            pred_word = word1 | (word2 << 24);  /* b5 b4 b3 b2 */\n            *((ULong*)(rec += 16)) = pred_word;\n\n            word1 = *((ULong*)(prev + 8)); /* b12 b11 b10 b9 */\n            word2 >>= 8; /* 0 b8 b7 b6 */\n            pred_word = word2 | (word1 << 24); /* b9 b8 b7 b6 */\n            *((ULong*)(rec + 4)) = pred_word;\n        }\n\n        return 1;\n    }\n    else if (tmp == 2) /* second position */\n    {\n        prev -= 2; /* word1-aligned */\n        rec -= 16; /* preset */\n        prev -= lx;\n\n        for (i = 8; i > 0; i--)\n        {\n            word1 = *((ULong*)(prev += lx)); /* read 4 bytes, b4 b3 b2 b1 */\n            word2 = *((ULong*)(prev + 4));  /* read 4 bytes, b8 b7 b6 b5 */\n            word1 >>= 16; /* 0 0 b4 b3 */\n            pred_word = word1 | (word2 << 16);  /* b6 b5 b4 b3 */\n            *((ULong*)(rec += 16)) = pred_word;\n\n            word1 = *((ULong*)(prev + 8)); /* b12 b11 b10 b9 */\n            word2 >>= 16; /* 0 0 b8 b7 */\n            pred_word = word2 | (word1 << 16); /* b10 b9 b8 b7 */\n            *((ULong*)(rec + 4)) = pred_word;\n        }\n\n        return 1;\n    }\n    else /* third position */\n    {\n        prev -= 3; /* word1-aligned */\n        rec -= 16; /* preset */\n        prev -= lx;\n\n        for (i = 8; i > 0; i--)\n        {\n            word1 = *((ULong*)(prev += lx)); /* read 4 bytes, b4 b3 b2 b1 */\n            word2 = *((ULong*)(prev + 4));  /* read 4 bytes, b8 b7 b6 b5 */\n            word1 >>= 24; /* 0 0 0 b4 */\n            pred_word = word1 | (word2 << 8);   /* b7 b6 b5 b4 */\n            *((ULong*)(rec += 16)) = pred_word;\n\n            word1 = *((ULong*)(prev + 8)); /* b12 b11 b10 b9 */\n            word2 >>= 24; /* 0 0 0 b8 */\n            pred_word = word2 | (word1 << 8); /* b11 b10 b9 b8 */\n            *((ULong*)(rec + 4)) = pred_word;\n\n        }\n\n        return 1;\n    }\n}\n/**************************************************************************/\nInt GetPredAdvBy0x1(\n    UChar *prev,        /* i */\n    UChar *rec,     /* i */\n    Int lx,     /* i */\n    Int rnd1 /* i */\n)\n{\n    Int i;      /* loop variable */\n    Int offset;\n    ULong word1, word2, word3, word12;\n    Int tmp;\n    ULong mask;\n\n    /* initialize offset to adjust pixel counter */\n    /*    the next row; full-pel resolution      */\n    offset = lx - B_SIZE; /* offset for prev */\n\n    /* Branch based on pixel location (half-pel or full-pel) for x and y */\n    rec -= 12; /* preset */\n\n    tmp = (ULong)prev & 3;\n    mask = 254;\n    mask |= (mask << 8);\n    mask |= (mask << 16); /* 0xFEFEFEFE */\n\n    if (tmp == 0) /* word-aligned */\n    {\n        if (rnd1 == 1)\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word1 = *((ULong*)prev); /* b4 b3 b2 b1 */\n                word2 = *((ULong*)(prev += 4)); /* b8 b7 b6 b5 */\n                word12 = (word1 >> 8); /* 0 b4 b3 b2 */\n                word12 |= (word2 << 24); /* b5 b4 b3 b2 */\n                word3 = word1 | word12; // rnd1 = 1; otherwise word3 = word1&word12\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word12 >> 1);\n                word1 += word3;\n                *((ULong*)(rec += 12)) = word1; /* write 4 pixels */\n\n                word1 = *((ULong*)(prev += 4)); /* b12 b11 b10 b9 */\n                word12 = (word2 >> 8); /* 0 b8 b7 b6 */\n                word12 |= (word1 << 24); /* b9 b8 b7 b6 */\n                word3 = word2 | word12;\n                word2 &= mask;\n                word3 &= (~mask);  /* 0x1010101, check last bit */\n                word12 &= mask;\n                word2 >>= 1;\n                word2 = word2 + (word12 >> 1);\n                word2 += word3;\n                *((ULong*)(rec += 4)) = word2; /* write 4 pixels */\n\n                prev += offset;\n            }\n            return 1;\n        }\n        else /* rnd1 == 0 */\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word1 = *((ULong*)prev); /* b4 b3 b2 b1 */\n\n                word2 = *((ULong*)(prev += 4)); /* b8 b7 b6 b5 */\n                word12 = (word1 >> 8); /* 0 b4 b3 b2 */\n                word12 |= (word2 << 24); /* b5 b4 b3 b2 */\n                word3 = word1 & word12; // rnd1 = 1; otherwise word3 = word1&word12\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word12 >> 1);\n                word1 += word3;\n                *((ULong*)(rec += 12)) = word1; /* write 4 pixels */\n\n                word1 = *((ULong*)(prev += 4)); /* b12 b11 b10 b9 */\n                word12 = (word2 >> 8); /* 0 b8 b7 b6 */\n                word12 |= (word1 << 24); /* b9 b8 b7 b6 */\n                word3 = word2 & word12;\n                word2 &= mask;\n                word3 &= (~mask);  /* 0x1010101, check last bit */\n                word12 &= mask;\n                word2 >>= 1;\n                word2 = word2 + (word12 >> 1);\n                word2 += word3;\n                *((ULong*)(rec += 4)) = word2; /* write 4 pixels */\n\n                prev += offset;\n            }\n            return 1;\n        } /* rnd1 */\n    }\n    else if (tmp == 1)\n    {\n        prev--; /* word-aligned */\n        if (rnd1 == 1)\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word1 = *((ULong*)prev); /* b3 b2 b1 b0 */\n                word2 = *((ULong*)(prev += 4)); /* b7 b6 b5 b4 */\n                word12 = (word1 >> 8); /* 0 b3 b2 b1 */\n                word1 >>= 16; /* 0 0 b3 b2 */\n                word12 |= (word2 << 24); /* b4 b3 b2 b1 */\n                word1 |= (word2 << 16); /* b5 b4 b3 b2 */\n                word3 = word1 | word12; // rnd1 = 1; otherwise word3 = word1&word12\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word12 >> 1);\n                word1 += word3;\n                *((ULong*)(rec += 12)) = word1; /* write 4 pixels */\n\n                word1 = *((ULong*)(prev += 4)); /* b11 b10 b9 b8 */\n                word12 = (word2 >> 8); /* 0 b7 b6 b5 */\n                word2 >>= 16; /* 0 0 b7 b6 */\n                word12 |= (word1 << 24); /* b8 b7 b6 b5 */\n                word2 |= (word1 << 16); /* b9 b8 b7 b6 */\n                word3 = word2 | word12; // rnd1 = 1; otherwise word3 = word2&word12\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word2 >>= 1;\n                word2 = word2 + (word12 >> 1);\n                word2 += word3;\n                *((ULong*)(rec += 4)) = word2; /* write 4 pixels */\n\n                prev += offset;\n            }\n            return 1;\n        }\n        else /* rnd1 = 0 */\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word1 = *((ULong*)prev); /* b3 b2 b1 b0 */\n\n                word2 = *((ULong*)(prev += 4)); /* b7 b6 b5 b4 */\n                word12 = (word1 >> 8); /* 0 b3 b2 b1 */\n                word1 >>= 16; /* 0 0 b3 b2 */\n                word12 |= (word2 << 24); /* b4 b3 b2 b1 */\n                word1 |= (word2 << 16); /* b5 b4 b3 b2 */\n                word3 = word1 & word12;\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word12 >> 1);\n                word1 += word3;\n                *((ULong*)(rec += 12)) = word1; /* write 4 pixels */\n\n                word1 = *((ULong*)(prev += 4)); /* b11 b10 b9 b8 */\n                word12 = (word2 >> 8); /* 0 b7 b6 b5 */\n                word2 >>= 16; /* 0 0 b7 b6 */\n                word12 |= (word1 << 24); /* b8 b7 b6 b5 */\n                word2 |= (word1 << 16); /* b9 b8 b7 b6 */\n                word3 = word2 & word12;\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word2 >>= 1;\n                word2 = word2 + (word12 >> 1);\n                word2 += word3;\n                *((ULong*)(rec += 4)) = word2; /* write 4 pixels */\n\n                prev += offset;\n            }\n            return 1;\n        } /* rnd1 */\n    }\n    else if (tmp == 2)\n    {\n        prev -= 2; /* word-aligned */\n        if (rnd1 == 1)\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word1 = *((ULong*)prev); /* b2 b1 b0 bN1 */\n                word2 = *((ULong*)(prev += 4)); /* b6 b5 b4 b3 */\n                word12 = (word1 >> 16); /* 0 0 b2 b1 */\n                word1 >>= 24; /* 0 0 0 b2 */\n                word12 |= (word2 << 16); /* b4 b3 b2 b1 */\n                word1 |= (word2 << 8); /* b5 b4 b3 b2 */\n                word3 = word1 | word12; // rnd1 = 1; otherwise word3 = word1&word12\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word12 >> 1);\n                word1 += word3;\n                *((ULong*)(rec += 12)) = word1; /* write 4 pixels */\n\n                word1 = *((ULong*)(prev += 4)); /* b10 b9 b8 b7 */\n                word12 = (word2 >> 16); /* 0 0 b6 b5 */\n                word2 >>= 24; /* 0 0 0 b6 */\n                word12 |= (word1 << 16); /* b8 b7 b6 b5 */\n                word2 |= (word1 << 8); /* b9 b8 b7 b6 */\n                word3 = word2 | word12; // rnd1 = 1; otherwise word3 = word1&word12\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word2 >>= 1;\n                word2 = word2 + (word12 >> 1);\n                word2 += word3;\n                *((ULong*)(rec += 4)) = word2; /* write 4 pixels */\n                prev += offset;\n            }\n            return 1;\n        }\n        else /* rnd1 == 0 */\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word1 = *((ULong*)prev); /* b2 b1 b0 bN1 */\n                word2 = *((ULong*)(prev += 4)); /* b6 b5 b4 b3 */\n                word12 = (word1 >> 16); /* 0 0 b2 b1 */\n                word1 >>= 24; /* 0 0 0 b2 */\n                word12 |= (word2 << 16); /* b4 b3 b2 b1 */\n                word1 |= (word2 << 8); /* b5 b4 b3 b2 */\n                word3 = word1 & word12; // rnd1 = 1; otherwise word3 = word1&word12\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word12 >> 1);\n                word1 += word3;\n                *((ULong*)(rec += 12)) = word1; /* write 4 pixels */\n\n                word1 = *((ULong*)(prev += 4)); /* b10 b9 b8 b7 */\n                word12 = (word2 >> 16); /* 0 0 b6 b5 */\n                word2 >>= 24; /* 0 0 0 b6 */\n                word12 |= (word1 << 16); /* b8 b7 b6 b5 */\n                word2 |= (word1 << 8); /* b9 b8 b7 b6 */\n                word3 = word2 & word12; // rnd1 = 1; otherwise word3 = word1&word12\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word2 >>= 1;\n                word2 = word2 + (word12 >> 1);\n                word2 += word3;\n                *((ULong*)(rec += 4)) = word2; /* write 4 pixels */\n                prev += offset;\n            }\n            return 1;\n        }\n    }\n    else /* tmp = 3 */\n    {\n        prev -= 3; /* word-aligned */\n        if (rnd1 == 1)\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word1 = *((ULong*)prev); /* b1 b0 bN1 bN2 */\n                word2 = *((ULong*)(prev += 4)); /* b5 b4 b3 b2 */\n                word12 = (word1 >> 24); /* 0 0 0 b1 */\n                word12 |= (word2 << 8); /* b4 b3 b2 b1 */\n                word1 = word2;\n                word3 = word1 | word12; // rnd1 = 1; otherwise word3 = word1&word12\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word12 >> 1);\n                word1 += word3;\n                *((ULong*)(rec += 12)) = word1; /* write 4 pixels */\n\n                word1 = *((ULong*)(prev += 4)); /* b9 b8 b7 b6 */\n                word12 = (word2 >> 24); /* 0 0 0 b5 */\n                word12 |= (word1 << 8); /* b8 b7 b6 b5 */\n                word2 = word1; /* b9 b8 b7 b6 */\n                word3 = word2 | word12; // rnd1 = 1; otherwise word3 = word1&word12\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word2 >>= 1;\n                word2 = word2 + (word12 >> 1);\n                word2 += word3;\n                *((ULong*)(rec += 4)) = word2; /* write 4 pixels */\n                prev += offset;\n            }\n            return 1;\n        }\n        else\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word1 = *((ULong*)prev); /* b1 b0 bN1 bN2 */\n                word2 = *((ULong*)(prev += 4)); /* b5 b4 b3 b2 */\n                word12 = (word1 >> 24); /* 0 0 0 b1 */\n                word12 |= (word2 << 8); /* b4 b3 b2 b1 */\n                word1 = word2;\n                word3 = word1 & word12; // rnd1 = 1; otherwise word3 = word1&word12\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word12 >> 1);\n                word1 += word3;\n                *((ULong*)(rec += 12)) = word1; /* write 4 pixels */\n\n                word1 = *((ULong*)(prev += 4)); /* b9 b8 b7 b6 */\n                word12 = (word2 >> 24); /* 0 0 0 b5 */\n                word12 |= (word1 << 8); /* b8 b7 b6 b5 */\n                word2 = word1; /* b9 b8 b7 b6 */\n                word3 = word2 & word12; // rnd1 = 1; otherwise word3 = word1&word12\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 &= mask;\n                word2 >>= 1;\n                word2 = word2 + (word12 >> 1);\n                word2 += word3;\n                *((ULong*)(rec += 4)) = word2; /* write 4 pixels */\n                prev += offset;\n            }\n            return 1;\n        }\n    }\n}\n\n/**************************************************************************/\nInt GetPredAdvBy1x0(\n    UChar *prev,        /* i */\n    UChar *rec,     /* i */\n    Int lx,     /* i */\n    Int rnd1 /* i */\n)\n{\n    Int i;      /* loop variable */\n    Int offset;\n    ULong  word1, word2, word3, word12, word22;\n    Int tmp;\n    ULong mask;\n\n    /* initialize offset to adjust pixel counter */\n    /*    the next row; full-pel resolution      */\n    offset = lx - B_SIZE; /* offset for prev */\n\n    /* Branch based on pixel location (half-pel or full-pel) for x and y */\n    rec -= 12; /* preset */\n\n    tmp = (ULong)prev & 3;\n    mask = 254;\n    mask |= (mask << 8);\n    mask |= (mask << 16); /* 0xFEFEFEFE */\n\n    if (tmp == 0) /* word-aligned */\n    {\n        prev -= 4;\n        if (rnd1 == 1)\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word1 = *((ULong*)(prev += 4));\n                word2 = *((ULong*)(prev + lx));\n                word3 = word1 | word2; // rnd1 = 1; otherwise word3 = word1&word2\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word2 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word2 >> 1);\n                word1 += word3;\n                *((ULong*)(rec += 12)) = word1;\n                word1 = *((ULong*)(prev += 4));\n                word2 = *((ULong*)(prev + lx));\n                word3 = word1 | word2; // rnd1 = 1; otherwise word3 = word1&word2\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word2 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word2 >> 1);\n                word1 += word3;\n                *((ULong*)(rec += 4)) = word1;\n\n                prev += offset;\n            }\n            return 1;\n        }\n        else   /* rnd1 = 0 */\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word1 = *((ULong*)(prev += 4));\n                word2 = *((ULong*)(prev + lx));\n                word3 = word1 & word2;  /* rnd1 = 0; */\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word2 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word2 >> 1);\n                word1 += word3;\n                *((ULong*)(rec += 12)) = word1;\n                word1 = *((ULong*)(prev += 4));\n                word2 = *((ULong*)(prev + lx));\n                word3 = word1 & word2;  /* rnd1 = 0; */\n                word1 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word2 &= mask;\n                word1 >>= 1;\n                word1 = word1 + (word2 >> 1);\n                word1 += word3;\n                *((ULong*)(rec += 4)) = word1;\n\n                prev += offset;\n            }\n            return 1;\n        }\n    }\n    else if (tmp == 1)\n    {\n        prev--; /* word-aligned */\n        if (rnd1 == 1)\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word12 = *((ULong*)prev); /* read b4 b3 b2 b1 */\n                word22 = *((ULong*)(prev + lx));\n\n                word1 = *((ULong*)(prev += 4)); /* read b8 b7 b6 b5 */\n                word2 = *((ULong*)(prev + lx));\n                word12 >>= 8; /* 0 b4 b3 b2 */\n                word22 >>= 8;\n                word12 = word12 | (word1 << 24); /* b5 b4 b3 b2 */\n                word22 = word22 | (word2 << 24);\n                word3 = word12 | word22;\n                word12 &= mask;\n                word22 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 >>= 1;\n                word12 = word12 + (word22 >> 1);\n                word12 += word3;\n                *((ULong*)(rec += 12)) = word12;\n\n                word12 = *((ULong*)(prev += 4)); /* read b12 b11 b10 b9 */\n                word22 = *((ULong*)(prev + lx));\n                word1 >>= 8; /* 0 b8 b7 b6 */\n                word2 >>= 8;\n                word1 = word1 | (word12 << 24); /* b9 b8 b7 b6 */\n                word2 = word2 | (word22 << 24);\n                word3 = word1 | word2;\n                word1 &= mask;\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word1 >>= 1;\n                word1 = word1 + (word2 >> 1);\n                word1 += word3;\n                *((ULong*)(rec += 4)) = word1;\n                prev += offset;\n            }\n            return 1;\n        }\n        else /* rnd1 = 0 */\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word12 = *((ULong*)prev); /* read b4 b3 b2 b1 */\n                word22 = *((ULong*)(prev + lx));\n\n                word1 = *((ULong*)(prev += 4)); /* read b8 b7 b6 b5 */\n                word2 = *((ULong*)(prev + lx));\n                word12 >>= 8; /* 0 b4 b3 b2 */\n                word22 >>= 8;\n                word12 = word12 | (word1 << 24); /* b5 b4 b3 b2 */\n                word22 = word22 | (word2 << 24);\n                word3 = word12 & word22;\n                word12 &= mask;\n                word22 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 >>= 1;\n                word12 = word12 + (word22 >> 1);\n                word12 += word3;\n                *((ULong*)(rec += 12)) = word12;\n\n                word12 = *((ULong*)(prev += 4)); /* read b12 b11 b10 b9 */\n                word22 = *((ULong*)(prev + lx));\n                word1 >>= 8; /* 0 b8 b7 b6 */\n                word2 >>= 8;\n                word1 = word1 | (word12 << 24); /* b9 b8 b7 b6 */\n                word2 = word2 | (word22 << 24);\n                word3 = word1 & word2;\n                word1 &= mask;\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word1 >>= 1;\n                word1 = word1 + (word2 >> 1);\n                word1 += word3;\n                *((ULong*)(rec += 4)) = word1;\n                prev += offset;\n            }\n            return 1;\n        }\n    }\n    else if (tmp == 2)\n    {\n        prev -= 2; /* word-aligned */\n        if (rnd1 == 1)\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word12 = *((ULong*)prev); /* read b4 b3 b2 b1 */\n                word22 = *((ULong*)(prev + lx));\n\n                word1 = *((ULong*)(prev += 4)); /* read b8 b7 b6 b5 */\n                word2 = *((ULong*)(prev + lx));\n                word12 >>= 16; /* 0 0 b4 b3 */\n                word22 >>= 16;\n                word12 = word12 | (word1 << 16); /* b6 b5 b4 b3 */\n                word22 = word22 | (word2 << 16);\n                word3 = word12 | word22;\n                word12 &= mask;\n                word22 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 >>= 1;\n                word12 = word12 + (word22 >> 1);\n                word12 += word3;\n                *((ULong*)(rec += 12)) = word12;\n\n                word12 = *((ULong*)(prev += 4)); /* read b12 b11 b10 b9 */\n                word22 = *((ULong*)(prev + lx));\n                word1 >>= 16; /* 0 0 b8 b7 */\n                word2 >>= 16;\n                word1 = word1 | (word12 << 16); /* b10 b9 b8 b7 */\n                word2 = word2 | (word22 << 16);\n                word3 = word1 | word2;\n                word1 &= mask;\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word1 >>= 1;\n                word1 = word1 + (word2 >> 1);\n                word1 += word3;\n                *((ULong*)(rec += 4)) = word1;\n                prev += offset;\n            }\n            return 1;\n        }\n        else /* rnd1 = 0 */\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word12 = *((ULong*)prev); /* read b4 b3 b2 b1 */\n                word22 = *((ULong*)(prev + lx));\n\n                word1 = *((ULong*)(prev += 4)); /* read b8 b7 b6 b5 */\n                word2 = *((ULong*)(prev + lx));\n                word12 >>= 16; /* 0 0 b4 b3 */\n                word22 >>= 16;\n                word12 = word12 | (word1 << 16); /* b6 b5 b4 b3 */\n                word22 = word22 | (word2 << 16);\n                word3 = word12 & word22;\n                word12 &= mask;\n                word22 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 >>= 1;\n                word12 = word12 + (word22 >> 1);\n                word12 += word3;\n                *((ULong*)(rec += 12)) = word12;\n\n                word12 = *((ULong*)(prev += 4)); /* read b12 b11 b10 b9 */\n                word22 = *((ULong*)(prev + lx));\n                word1 >>= 16; /* 0 0 b8 b7 */\n                word2 >>= 16;\n                word1 = word1 | (word12 << 16); /* b10 b9 b8 b7 */\n                word2 = word2 | (word22 << 16);\n                word3 = word1 & word2;\n                word1 &= mask;\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word1 >>= 1;\n                word1 = word1 + (word2 >> 1);\n                word1 += word3;\n                *((ULong*)(rec += 4)) = word1;\n                prev += offset;\n            }\n\n            return 1;\n        }\n    }\n    else /* tmp == 3 */\n    {\n        prev -= 3; /* word-aligned */\n        if (rnd1 == 1)\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word12 = *((ULong*)prev); /* read b4 b3 b2 b1 */\n                word22 = *((ULong*)(prev + lx));\n\n                word1 = *((ULong*)(prev += 4)); /* read b8 b7 b6 b5 */\n                word2 = *((ULong*)(prev + lx));\n                word12 >>= 24; /* 0 0 0 b4 */\n                word22 >>= 24;\n                word12 = word12 | (word1 << 8); /* b7 b6 b5 b4 */\n                word22 = word22 | (word2 << 8);\n                word3 = word12 | word22;\n                word12 &= mask;\n                word22 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 >>= 1;\n                word12 = word12 + (word22 >> 1);\n                word12 += word3;\n                *((ULong*)(rec += 12)) = word12;\n\n                word12 = *((ULong*)(prev += 4)); /* read b12 b11 b10 b9 */\n                word22 = *((ULong*)(prev + lx));\n                word1 >>= 24; /* 0 0 0 b8 */\n                word2 >>= 24;\n                word1 = word1 | (word12 << 8); /* b11 b10 b9 b8 */\n                word2 = word2 | (word22 << 8);\n                word3 = word1 | word2;\n                word1 &= mask;\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word1 >>= 1;\n                word1 = word1 + (word2 >> 1);\n                word1 += word3;\n                *((ULong*)(rec += 4)) = word1;\n                prev += offset;\n            }\n            return 1;\n        }\n        else /* rnd1 = 0 */\n        {\n            for (i = B_SIZE; i > 0; i--)\n            {\n                word12 = *((ULong*)prev); /* read b4 b3 b2 b1 */\n                word22 = *((ULong*)(prev + lx));\n\n                word1 = *((ULong*)(prev += 4)); /* read b8 b7 b6 b5 */\n                word2 = *((ULong*)(prev + lx));\n                word12 >>= 24; /* 0 0 0 b4 */\n                word22 >>= 24;\n                word12 = word12 | (word1 << 8); /* b7 b6 b5 b4 */\n                word22 = word22 | (word2 << 8);\n                word3 = word12 & word22;\n                word12 &= mask;\n                word22 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word12 >>= 1;\n                word12 = word12 + (word22 >> 1);\n                word12 += word3;\n                *((ULong*)(rec += 12)) = word12;\n\n                word12 = *((ULong*)(prev += 4)); /* read b12 b11 b10 b9 */\n                word22 = *((ULong*)(prev + lx));\n                word1 >>= 24; /* 0 0 0 b8 */\n                word2 >>= 24;\n                word1 = word1 | (word12 << 8); /* b11 b10 b9 b8 */\n                word2 = word2 | (word22 << 8);\n                word3 = word1 & word2;\n                word1 &= mask;\n                word2 &= mask;\n                word3 &= (~mask); /* 0x1010101, check last bit */\n                word1 >>= 1;\n                word1 = word1 + (word2 >> 1);\n                word1 += word3;\n                *((ULong*)(rec += 4)) = word1;\n                prev += offset;\n            }\n            return 1;\n        } /* rnd */\n    } /* tmp */\n}\n\n/**********************************************************************************/\nInt GetPredAdvBy1x1(\n    UChar *prev,        /* i */\n    UChar *rec,     /* i */\n    Int lx,     /* i */\n    Int rnd1 /* i */\n)\n{\n    Int i;      /* loop variable */\n    Int offset;\n    ULong  x1, x2, x1m, x2m, y1, y2, y1m, y2m; /* new way */\n    Int tmp;\n    Int rnd2;\n    ULong mask;\n\n    /* initialize offset to adjust pixel counter */\n    /*    the next row; full-pel resolution      */\n    offset = lx - B_SIZE; /* offset for prev */\n\n    rnd2 = rnd1 + 1;\n    rnd2 |= (rnd2 << 8);\n    rnd2 |= (rnd2 << 16);\n\n    mask = 0x3F;\n    mask |= (mask << 8);\n    mask |= (mask << 16); /* 0x3f3f3f3f */\n\n    tmp = (ULong)prev & 3;\n\n    rec -= 4; /* preset */\n\n    if (tmp == 0) /* word-aligned */\n    {\n        for (i = B_SIZE; i > 0; i--)\n        {\n            x1 = *((ULong*)prev); /* load a3 a2 a1 a0 */\n            x2 = *((ULong*)(prev + lx)); /* load b3 b2 b1 b0, another line */\n            y1 = *((ULong*)(prev += 4)); /* a7 a6 a5 a4 */\n            y2 = *((ULong*)(prev + lx)); /* b7 b6 b5 b4 */\n\n            x1m = (x1 >> 2) & mask; /* zero out last 2 bits */\n            x2m = (x2 >> 2) & mask;\n            x1 = x1 ^(x1m << 2);\n            x2 = x2 ^(x2m << 2);\n            x1m += x2m;\n            x1 += x2;\n\n            /* x2m, x2 free */\n            y1m = (y1 >> 2) & mask; /* zero out last 2 bits */\n            y2m = (y2 >> 2) & mask;\n            y1 = y1 ^(y1m << 2);\n            y2 = y2 ^(y2m << 2);\n            y1m += y2m;\n            y1 += y2;\n\n            /* y2m, y2 free */\n            /* x2m, x2 free */\n            x2 = *((ULong*)(prev += 4)); /* a11 a10 a9 a8 */\n            y2 = *((ULong*)(prev + lx)); /* b11 b10 b9 b8 */\n            x2m = (x2 >> 2) & mask;\n            y2m = (y2 >> 2) & mask;\n            x2 = x2 ^(x2m << 2);\n            y2 = y2 ^(y2m << 2);\n            x2m += y2m;\n            x2 += y2;\n            /* y2m, y2 free */\n\n            /* now operate on x1m, x1, y1m, y1, x2m, x2 */\n            /* x1m = a3+b3, a2+b2, a1+b1, a0+b0 */\n            /* y1m = a7+b7, a6+b6, a5+b5, a4+b4 */\n            /* x2m = a11+b11, a10+b10, a9+b9, a8+b8 */\n            /* x1, y1, x2 */\n\n            y2m = x1m >> 8;\n            y2 = x1 >> 8;\n            y2m |= (y1m << 24);  /* a4+b4, a3+b3, a2+b2, a1+b1 */\n            y2 |= (y1 << 24);\n            x1m += y2m;  /* a3+b3+a4+b4, ....., a0+b0+a1+b1 */\n            x1 += y2;\n            x1 += rnd2;\n            x1 &= (mask << 2);\n            x1m += (x1 >> 2);\n            *((ULong*)(rec += 4)) = x1m; /* save x1m */\n\n            y2m = y1m >> 8;\n            y2 = y1 >> 8;\n            y2m |= (x2m << 24); /* a8+b8, a7+b7, a6+b6, a5+b5 */\n            y2 |= (x2 << 24);\n            y1m += y2m;  /* a7+b7+a8+b8, ....., a4+b4+a5+b5 */\n            y1 += y2;\n            y1 += rnd2;\n            y1 &= (mask << 2);\n            y1m += (y1 >> 2);\n            *((ULong*)(rec += 4)) = y1m; /* save y1m */\n\n            rec += 8;\n            prev += offset;\n        }\n\n        return 1;\n    }\n    else if (tmp == 1)\n    {\n        prev--; /* to word-aligned */\n        for (i = B_SIZE; i > 0; i--)\n        {\n            x1 = *((ULong*)prev); /* load a3 a2 a1 a0 */\n            x2 = *((ULong*)(prev + lx)); /* load b3 b2 b1 b0, another line */\n            y1 = *((ULong*)(prev += 4)); /* a7 a6 a5 a4 */\n            y2 = *((ULong*)(prev + lx)); /* b7 b6 b5 b4 */\n\n            x1m = (x1 >> 2) & mask; /* zero out last 2 bits */\n            x2m = (x2 >> 2) & mask;\n            x1 = x1 ^(x1m << 2);\n            x2 = x2 ^(x2m << 2);\n            x1m += x2m;\n            x1 += x2;\n\n            /* x2m, x2 free */\n            y1m = (y1 >> 2) & mask; /* zero out last 2 bits */\n            y2m = (y2 >> 2) & mask;\n            y1 = y1 ^(y1m << 2);\n            y2 = y2 ^(y2m << 2);\n            y1m += y2m;\n            y1 += y2;\n\n            /* y2m, y2 free */\n            /* x2m, x2 free */\n            x2 = *((ULong*)(prev += 4)); /* a11 a10 a9 a8 */\n            y2 = *((ULong*)(prev + lx)); /* b11 b10 b9 b8 */\n            x2m = (x2 >> 2) & mask;\n            y2m = (y2 >> 2) & mask;\n            x2 = x2 ^(x2m << 2);\n            y2 = y2 ^(y2m << 2);\n            x2m += y2m;\n            x2 += y2;\n            /* y2m, y2 free */\n\n            /* now operate on x1m, x1, y1m, y1, x2m, x2 */\n            /* x1m = a3+b3, a2+b2, a1+b1, a0+b0 */\n            /* y1m = a7+b7, a6+b6, a5+b5, a4+b4 */\n            /* x2m = a11+b11, a10+b10, a9+b9, a8+b8 */\n            /* x1, y1, x2 */\n\n            x1m >>= 8 ;\n            x1 >>= 8;\n            x1m |= (y1m << 24);  /* a4+b4, a3+b3, a2+b2, a1+b1 */\n            x1 |= (y1 << 24);\n            y2m = (y1m << 16);\n            y2 = (y1 << 16);\n            y2m |= (x1m >> 8); /* a5+b5, a4+b4, a3+b3, a2+b2 */\n            y2 |= (x1 >> 8);\n            x1 += rnd2;\n            x1m += y2m;  /* a4+b4+a5+b5, ....., a1+b1+a2+b2 */\n            x1 += y2;\n            x1 &= (mask << 2);\n            x1m += (x1 >> 2);\n            *((ULong*)(rec += 4)) = x1m; /* save x1m */\n\n            y1m >>= 8;\n            y1 >>= 8;\n            y1m |= (x2m << 24); /* a8+b8, a7+b7, a6+b6, a5+b5 */\n            y1 |= (x2 << 24);\n            y2m = (x2m << 16);\n            y2 = (x2 << 16);\n            y2m |= (y1m >> 8); /*  a9+b9, a8+b8, a7+b7, a6+b6,*/\n            y2 |= (y1 >> 8);\n            y1 += rnd2;\n            y1m += y2m;  /* a8+b8+a9+b9, ....., a5+b5+a6+b6 */\n            y1 += y2;\n            y1 &= (mask << 2);\n            y1m += (y1 >> 2);\n            *((ULong*)(rec += 4)) = y1m; /* save y1m */\n\n            rec += 8;\n            prev += offset;\n        }\n        return 1;\n    }\n    else if (tmp == 2)\n    {\n        prev -= 2; /* to word-aligned */\n        for (i = B_SIZE; i > 0; i--)\n        {\n            x1 = *((ULong*)prev); /* load a3 a2 a1 a0 */\n            x2 = *((ULong*)(prev + lx)); /* load b3 b2 b1 b0, another line */\n            y1 = *((ULong*)(prev += 4)); /* a7 a6 a5 a4 */\n            y2 = *((ULong*)(prev + lx)); /* b7 b6 b5 b4 */\n\n            x1m = (x1 >> 2) & mask; /* zero out last 2 bits */\n            x2m = (x2 >> 2) & mask;\n            x1 = x1 ^(x1m << 2);\n            x2 = x2 ^(x2m << 2);\n            x1m += x2m;\n            x1 += x2;\n\n            /* x2m, x2 free */\n            y1m = (y1 >> 2) & mask; /* zero out last 2 bits */\n            y2m = (y2 >> 2) & mask;\n            y1 = y1 ^(y1m << 2);\n            y2 = y2 ^(y2m << 2);\n            y1m += y2m;\n            y1 += y2;\n\n            /* y2m, y2 free */\n            /* x2m, x2 free */\n            x2 = *((ULong*)(prev += 4)); /* a11 a10 a9 a8 */\n            y2 = *((ULong*)(prev + lx)); /* b11 b10 b9 b8 */\n            x2m = (x2 >> 2) & mask;\n            y2m = (y2 >> 2) & mask;\n            x2 = x2 ^(x2m << 2);\n            y2 = y2 ^(y2m << 2);\n            x2m += y2m;\n            x2 += y2;\n            /* y2m, y2 free */\n\n            /* now operate on x1m, x1, y1m, y1, x2m, x2 */\n            /* x1m = a3+b3, a2+b2, a1+b1, a0+b0 */\n            /* y1m = a7+b7, a6+b6, a5+b5, a4+b4 */\n            /* x2m = a11+b11, a10+b10, a9+b9, a8+b8 */\n            /* x1, y1, x2 */\n\n            x1m >>= 16 ;\n            x1 >>= 16;\n            x1m |= (y1m << 16);  /* a5+b5, a4+b4, a3+b3, a2+b2 */\n            x1 |= (y1 << 16);\n            y2m = (y1m << 8);\n            y2 = (y1 << 8);\n            y2m |= (x1m >> 8); /* a6+b6, a5+b5, a4+b4, a3+b3 */\n            y2 |= (x1 >> 8);\n            x1 += rnd2;\n            x1m += y2m;  /* a5+b5+a6+b6, ....., a2+b2+a3+b3 */\n            x1 += y2;\n            x1 &= (mask << 2);\n            x1m += (x1 >> 2);\n            *((ULong*)(rec += 4)) = x1m; /* save x1m */\n\n            y1m >>= 16;\n            y1 >>= 16;\n            y1m |= (x2m << 16); /* a9+b9, a8+b8, a7+b7, a6+b6 */\n            y1 |= (x2 << 16);\n            y2m = (x2m << 8);\n            y2 = (x2 << 8);\n            y2m |= (y1m >> 8); /*  a10+b10, a9+b9, a8+b8, a7+b7,*/\n            y2 |= (y1 >> 8);\n            y1 += rnd2;\n            y1m += y2m;  /* a9+b9+a10+b10, ....., a6+b6+a7+b7 */\n            y1 += y2;\n            y1 &= (mask << 2);\n            y1m += (y1 >> 2);\n            *((ULong*)(rec += 4)) = y1m; /* save y1m */\n\n            rec += 8;\n            prev += offset;\n        }\n        return 1;\n    }\n    else /* tmp == 3 */\n    {\n        prev -= 3; /* to word-aligned */\n        for (i = B_SIZE; i > 0; i--)\n        {\n            x1 = *((ULong*)prev); /* load a3 a2 a1 a0 */\n            x2 = *((ULong*)(prev + lx)); /* load b3 b2 b1 b0, another line */\n            y1 = *((ULong*)(prev += 4)); /* a7 a6 a5 a4 */\n            y2 = *((ULong*)(prev + lx)); /* b7 b6 b5 b4 */\n\n            x1m = (x1 >> 2) & mask; /* zero out last 2 bits */\n            x2m = (x2 >> 2) & mask;\n            x1 = x1 ^(x1m << 2);\n            x2 = x2 ^(x2m << 2);\n            x1m += x2m;\n            x1 += x2;\n\n            /* x2m, x2 free */\n            y1m = (y1 >> 2) & mask; /* zero out last 2 bits */\n            y2m = (y2 >> 2) & mask;\n            y1 = y1 ^(y1m << 2);\n            y2 = y2 ^(y2m << 2);\n            y1m += y2m;\n            y1 += y2;\n\n            /* y2m, y2 free */\n            /* x2m, x2 free */\n            x2 = *((ULong*)(prev += 4)); /* a11 a10 a9 a8 */\n            y2 = *((ULong*)(prev + lx)); /* b11 b10 b9 b8 */\n            x2m = (x2 >> 2) & mask;\n            y2m = (y2 >> 2) & mask;\n            x2 = x2 ^(x2m << 2);\n            y2 = y2 ^(y2m << 2);\n            x2m += y2m;\n            x2 += y2;\n            /* y2m, y2 free */\n\n            /* now operate on x1m, x1, y1m, y1, x2m, x2 */\n            /* x1m = a3+b3, a2+b2, a1+b1, a0+b0 */\n            /* y1m = a7+b7, a6+b6, a5+b5, a4+b4 */\n            /* x2m = a11+b11, a10+b10, a9+b9, a8+b8 */\n            /* x1, y1, x2 */\n\n            x1m >>= 24 ;\n            x1 >>= 24;\n            x1m |= (y1m << 8);  /* a6+b6, a5+b5, a4+b4, a3+b3 */\n            x1 |= (y1 << 8);\n\n            x1m += y1m;  /* a6+b6+a7+b7, ....., a3+b3+a4+b4 */\n            x1 += y1;\n            x1 += rnd2;\n            x1 &= (mask << 2);\n            x1m += (x1 >> 2);\n            *((ULong*)(rec += 4)) = x1m; /* save x1m */\n\n            y1m >>= 24;\n            y1 >>= 24;\n            y1m |= (x2m << 8); /* a10+b10, a9+b9, a8+b8, a7+b7 */\n            y1 |= (x2 << 8);\n            y1m += x2m;  /* a10+b10+a11+b11, ....., a7+b7+a8+b8 */\n            y1 += x2;\n            y1 += rnd2;\n            y1 &= (mask << 2);\n            y1m += (y1 >> 2);\n            *((ULong*)(rec += 4)) = y1m; /* save y1m */\n\n            rec += 8;\n            prev += offset;\n        }\n        return 1;\n    }\n}\n\n\n/*=============================================================================\n    Function:   EncGetPredOutside\n    Date:       04/17/2001\n    Purpose:    - modified from GetPredOutside in the decoder.\n    Modified:    09/24/05\n                use the existing non-initialized padded region\n=============================================================================*/\n// not really needed since padding is included\n#define PAD_CORNER  { temp = *src; \\\n                     temp |= (temp<<8); \\\n                     temp |= (temp<<16); \\\n                     *((ULong*)dst) = temp; \\\n                     *((ULong*)(dst+4)) = temp; \\\n                     *((ULong*)(dst+=lx)) = temp; \\\n                     *((ULong*)(dst+4)) = temp; \\\n                     *((ULong*)(dst+=lx)) = temp; \\\n                     *((ULong*)(dst+4)) = temp; \\\n                     *((ULong*)(dst+=lx)) = temp; \\\n                     *((ULong*)(dst+4)) = temp; \\\n                     *((ULong*)(dst+=lx)) = temp; \\\n                     *((ULong*)(dst+4)) = temp; \\\n                     *((ULong*)(dst+=lx)) = temp; \\\n                     *((ULong*)(dst+4)) = temp; \\\n                     *((ULong*)(dst+=lx)) = temp; \\\n                     *((ULong*)(dst+4)) = temp; \\\n                     *((ULong*)(dst+=lx)) = temp; \\\n                     *((ULong*)(dst+4)) = temp; }\n\n#define PAD_ROW     { temp = *((ULong*)src); \\\n                      temp2 = *((ULong*)(src+4)); \\\n                      *((ULong*)dst) = temp; \\\n                      *((ULong*)(dst+4)) = temp2; \\\n                      *((ULong*)(dst+=lx)) = temp; \\\n                      *((ULong*)(dst+4)) = temp2; \\\n                      *((ULong*)(dst+=lx)) = temp; \\\n                      *((ULong*)(dst+4)) = temp2; \\\n                      *((ULong*)(dst+=lx)) = temp; \\\n                      *((ULong*)(dst+4)) = temp2; \\\n                      *((ULong*)(dst+=lx)) = temp; \\\n                      *((ULong*)(dst+4)) = temp2; \\\n                      *((ULong*)(dst+=lx)) = temp; \\\n                      *((ULong*)(dst+4)) = temp2; \\\n                      *((ULong*)(dst+=lx)) = temp; \\\n                      *((ULong*)(dst+4)) = temp2; \\\n                      *((ULong*)(dst+=lx)) = temp; \\\n                      *((ULong*)(dst+4)) = temp2; }\n\n#define PAD_COL     { temp = *src;   temp |= (temp<<8);  temp |= (temp<<16); \\\n                      *((ULong*)dst) = temp; \\\n                     *((ULong*)(dst+4)) = temp; \\\n                      temp = *(src+=lx);     temp |= (temp<<8);  temp |= (temp<<16); \\\n                      *((ULong*)(dst+=lx)) = temp; \\\n                     *((ULong*)(dst+4)) = temp; \\\n                      temp = *(src+=lx);     temp |= (temp<<8);  temp |= (temp<<16); \\\n                      *((ULong*)(dst+=lx)) = temp; \\\n                     *((ULong*)(dst+4)) = temp; \\\n                      temp = *(src+=lx);     temp |= (temp<<8);  temp |= (temp<<16); \\\n                      *((ULong*)(dst+=lx)) = temp; \\\n                     *((ULong*)(dst+4)) = temp; \\\n                      temp = *(src+=lx);     temp |= (temp<<8);  temp |= (temp<<16); \\\n                      *((ULong*)(dst+=lx)) = temp; \\\n                     *((ULong*)(dst+4)) = temp; \\\n                      temp = *(src+=lx);     temp |= (temp<<8);  temp |= (temp<<16); \\\n                      *((ULong*)(dst+=lx)) = temp; \\\n                     *((ULong*)(dst+4)) = temp; \\\n                      temp = *(src+=lx);     temp |= (temp<<8);  temp |= (temp<<16); \\\n                      *((ULong*)(dst+=lx)) = temp; \\\n                     *((ULong*)(dst+4)) = temp; \\\n                      temp = *(src+=lx);     temp |= (temp<<8);  temp |= (temp<<16); \\\n                      *((ULong*)(dst+=lx)) = temp; \\\n                      *((ULong*)(dst+4)) = temp; }\n\n\nInt EncGetPredOutside(Int xpos, Int ypos, UChar *c_prev, UChar *rec,\n                      Int width, Int height, Int rnd1)\n{\n    Int lx;\n    UChar *src, *dst;\n    ULong temp, temp2;\n    Int xoffset;\n\n    lx = width + 16; /* only works for chroma */\n\n    if (xpos < 0)\n    {\n        if (ypos < 0) /* pad top-left */\n        {\n            /* pad corner */\n            src = c_prev;\n            dst = c_prev - (lx << 3) - 8;\n            PAD_CORNER\n\n            /* pad top */\n            dst = c_prev - (lx << 3);\n            PAD_ROW\n\n            /* pad left */\n            dst = c_prev - 8;\n            PAD_COL\n\n            GetPredAdvBTable[ypos&1][xpos&1](c_prev + (xpos >> 1) + ((ypos >> 1)*lx),\n                                             rec, lx, rnd1);\n\n            return 1;\n        }\n        else if ((ypos >> 1) < (height - 8)) /* pad left of frame */\n        {\n            /* pad left */\n            src = c_prev + (ypos >> 1) * lx;\n            dst = src - 8;\n            PAD_COL\n            /* pad extra row */\n            temp = *(src += lx);\n            temp |= (temp << 8);\n            temp |= (temp << 16);\n            *((ULong*)(dst += lx)) = temp;\n            *((ULong*)(dst + 4)) = temp;\n\n            GetPredAdvBTable[ypos&1][xpos&1](c_prev + (xpos >> 1) + ((ypos >> 1)*lx),\n                                             rec, lx, rnd1);\n\n            return 1;\n        }\n        else /* pad bottom-left */\n        {\n            /* pad corner */\n            src = c_prev + (height - 1) * lx;\n            dst = src + lx - 8;\n            PAD_CORNER\n\n            /* pad bottom */\n            dst = src + lx;\n            PAD_ROW\n\n            /* pad left */\n            src -= (lx << 3);\n            src += lx;\n            dst = src - 8;\n            PAD_COL\n\n            GetPredAdvBTable[ypos&1][xpos&1](c_prev + (xpos >> 1) + ((ypos >> 1)*lx),\n                                             rec, lx, rnd1);\n\n            return 1;\n        }\n    }\n    else if ((xpos >> 1) < (width - 8))\n    {\n        if (ypos < 0) /* pad top of frame */\n        {\n            xoffset = (xpos >> 1) & 0x3;\n            src = c_prev + (xpos >> 1) - xoffset;\n            dst = src - (lx << 3);\n            PAD_ROW\n            if (xoffset || (xpos&1))\n            {\n                temp = *((ULong*)(src + 8));\n                dst = src - (lx << 3) + 8;\n                *((ULong*)dst) = temp;\n                *((ULong*)(dst += lx)) = temp;\n                *((ULong*)(dst += lx)) = temp;\n                *((ULong*)(dst += lx)) = temp;\n                *((ULong*)(dst += lx)) = temp;\n                *((ULong*)(dst += lx)) = temp;\n                *((ULong*)(dst += lx)) = temp;\n                *((ULong*)(dst += lx)) = temp;\n            }\n\n            GetPredAdvBTable[ypos&1][xpos&1](c_prev + (xpos >> 1) + ((ypos >> 1)*lx),\n                                             rec, lx, rnd1);\n\n            return 1;\n        }\n        else /* pad bottom of frame */\n        {\n            xoffset = (xpos >> 1) & 0x3;\n            src = c_prev + (xpos >> 1) - xoffset + (height - 1) * lx;\n            dst = src + lx;\n            PAD_ROW\n            if (xoffset || (xpos&1))\n            {\n                temp = *((ULong*)(src + 8));\n                dst = src + lx + 8;\n                *((ULong*)dst) = temp;\n                *((ULong*)(dst += lx)) = temp;\n                *((ULong*)(dst += lx)) = temp;\n                *((ULong*)(dst += lx)) = temp;\n                *((ULong*)(dst += lx)) = temp;\n                *((ULong*)(dst += lx)) = temp;\n                *((ULong*)(dst += lx)) = temp;\n                *((ULong*)(dst += lx)) = temp;\n            }\n\n            GetPredAdvBTable[ypos&1][xpos&1](c_prev + (xpos >> 1) + ((ypos >> 1)*lx),\n                                             rec, lx, rnd1);\n\n            return 1;\n        }\n    }\n    else\n    {\n        if (ypos < 0) /* pad top-right */\n        {\n            /* pad corner */\n            src = c_prev + width - 1;\n            dst = src - (lx << 3) + 1;\n            PAD_CORNER\n\n            /* pad top */\n            src -= 7;\n            dst = src - (lx << 3);\n            PAD_ROW\n\n            /* pad left */\n            src += 7;\n            dst = src + 1;\n            PAD_COL\n\n            GetPredAdvBTable[ypos&1][xpos&1](c_prev + (xpos >> 1) + ((ypos >> 1)*lx),\n                                             rec, lx, rnd1);\n\n            return 1;\n        }\n        else if ((ypos >> 1) < (height - B_SIZE)) /* pad right of frame */\n        {\n            /* pad left */\n            src = c_prev + (ypos >> 1) * lx + width - 1;\n            dst = src + 1;\n            PAD_COL\n            /* pad extra row */\n            temp = *(src += lx);\n            temp |= (temp << 8);\n            temp |= (temp << 16);\n            *((ULong*)(dst += lx)) = temp;\n            *((ULong*)(dst + 4)) = temp;\n\n            GetPredAdvBTable[ypos&1][xpos&1](c_prev + (xpos >> 1) + ((ypos >> 1)*lx),\n                                             rec, lx, rnd1);\n\n            return 1;\n        }\n        else /* pad bottom-right */\n        {\n            /* pad left */\n            src = c_prev + (height - 8) * lx + width - 1;\n            dst = src + 1;\n            PAD_COL\n\n            /* pad corner */\n            dst = src + lx + 1;\n            PAD_CORNER\n\n            /* pad bottom */\n            src -= 7;\n            dst = src + lx;\n            PAD_ROW\n\n            GetPredAdvBTable[ypos&1][xpos&1](c_prev + (xpos >> 1) + ((ypos >> 1)*lx),\n                                             rec, lx, rnd1);\n\n            return 1;\n        }\n    }\n}\n\n/* ====================================================================== /\n    Function : Copy_MB_from_Vop()\n    Date     : 04/17/2001\n ====================================================================== */\n\nvoid Copy_MB_from_Vop(UChar *comp, Int yChan[][NCOEFF_BLOCK], Int pitch)\n{\n    Int row, col, i;\n    Int *src1, *src2;\n    Int offset = pitch - MB_SIZE;\n    ULong temp;\n\n    for (i = 0; i < 4; i += 2)\n    {\n        src1 = yChan[i];\n        src2 = yChan[i+1];\n\n        row = B_SIZE;\n        while (row--)\n        {\n            col = B_SIZE;\n            while (col)\n            {\n                temp = *((ULong*)comp);\n                *src1++ = (Int)(temp & 0xFF);\n                *src1++ = (Int)((temp >> 8) & 0xFF);\n                *src1++ = (Int)((temp >> 16) & 0xFF);\n                *src1++ = (Int)((temp >> 24) & 0xFF);\n                comp += 4;\n                col -= 4;\n            }\n            col = B_SIZE;\n            while (col)\n            {\n                temp = *((ULong*)comp);\n                *src2++ = (Int)(temp & 0xFF);\n                *src2++ = (Int)((temp >> 8) & 0xFF);\n                *src2++ = (Int)((temp >> 16) & 0xFF);\n                *src2++ = (Int)((temp >> 24) & 0xFF);\n                comp += 4;\n                col -= 4;\n            }\n            comp += offset;\n        }\n    }\n    return ;\n}\n\n/* ====================================================================== /\n    Function : Copy_B_from_Vop()\n    Date     : 04/17/2001\n/ ====================================================================== */\n\nvoid Copy_B_from_Vop(UChar *comp, Int cChan[], Int pitch)\n{\n    Int row, col;\n    Int offset = pitch - B_SIZE;\n    ULong temp;\n\n    row = B_SIZE;\n    while (row--)\n    {\n        col = B_SIZE;\n        while (col)\n        {\n            temp = *((ULong*)comp);\n            *cChan++ = (Int)(temp & 0xFF);\n            *cChan++ = (Int)((temp >> 8) & 0xFF);\n            *cChan++ = (Int)((temp >> 16) & 0xFF);\n            *cChan++ = (Int)((temp >> 24) & 0xFF);\n            comp += 4;\n            col -= 4;\n        }\n        comp += offset;\n    }\n}\n\n/* ====================================================================== /\n    Function : Copy_MB_into_Vop()\n    Date     : 04/17/2001\n    History  : From decoder\n/ ====================================================================== */\n\nvoid Copy_MB_into_Vop(UChar *comp, Int yChan[][NCOEFF_BLOCK], Int pitch)\n{\n    Int row, col, i;\n    Int *src1, *src2;\n    Int offset = pitch - MB_SIZE;\n    UChar mask = 0xFF;\n    Int tmp;\n    ULong temp;\n\n    for (i = 0; i < 4; i += 2)\n    {\n        src1 = yChan[i];\n        src2 = yChan[i+1];\n\n        row = B_SIZE;\n        while (row--)\n        {\n            col = B_SIZE;\n            while (col)\n            {\n                tmp = (*src1++);\n                if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31));\n                temp = tmp << 24;\n                tmp = (*src1++);\n                if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31));\n                temp |= (tmp << 16);\n                tmp = (*src1++);\n                if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31));\n                temp |= (tmp << 8);\n                tmp = (*src1++);\n                if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31));\n                temp |= tmp;\n                *((ULong*)comp) = temp;\n                comp += 4;\n                col -= 4;\n            }\n            col = B_SIZE;\n            while (col)\n            {\n                tmp = (*src2++);\n                if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31));\n                temp = tmp << 24;\n                tmp = (*src2++);\n                if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31));\n                temp |= (tmp << 16);\n                tmp = (*src2++);\n                if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31));\n                temp |= (tmp << 8);\n                tmp = (*src2++);\n                if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31));\n                temp |= tmp;\n                *((ULong*)comp) = temp;\n                comp += 4;\n                col -= 4;\n            }\n            comp += offset;\n        }\n    }\n    return ;\n}\n\n\n/* ====================================================================== /\n    Function : Copy_B_into_Vop()\n    Date     : 04/17/2001\n    History  : From decoder\n/ ====================================================================== */\n\nvoid Copy_B_into_Vop(UChar *comp, Int cChan[], Int pitch)\n{\n    Int row, col;\n    Int offset = pitch - B_SIZE;\n    Int tmp;\n    UChar mask = 0xFF;\n    ULong temp;\n\n    row = B_SIZE;\n    while (row--)\n    {\n        col = B_SIZE;\n        while (col)\n        {\n            tmp = (*cChan++);\n            if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31));\n            temp = tmp << 24;\n            tmp = (*cChan++);\n            if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31));\n            temp |= (tmp << 16);\n            tmp = (*cChan++);\n            if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31));\n            temp |= (tmp << 8);\n            tmp = (*cChan++);\n            if ((UInt)tmp > mask) tmp = mask & (~(tmp >> 31));\n            temp |= tmp;\n            *((ULong*)comp) = temp;\n            comp += 4;\n            col -= 4;\n        }\n        comp += offset;\n    }\n}\n\n/* ======================================================================== */\n/*  Function : get_MB( )                                                    */\n/*  Date     : 10/03/2000                                                   */\n/*  Purpose  : Copy 4 Y to reference frame                                  */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nvoid get_MB(UChar *c_prev, UChar *c_prev_u  , UChar *c_prev_v,\n            Short mb[6][64], Int lx, Int lx_uv)\n\n{\n    Int i, j, count = 0, count1 = 0;\n    Int k1 = lx - MB_SIZE, k2 = lx_uv - B_SIZE;\n\n    for (i = 0; i < B_SIZE; i++)\n    {\n        for (j = 0; j < B_SIZE; j++)\n        {\n            mb[0][count] = (Int)(*c_prev++);\n            mb[4][count] = (Int)(*c_prev_u++);\n            mb[5][count++] = (Int)(*c_prev_v++);\n        }\n\n        for (j = 0; j < B_SIZE; j++)\n            mb[1][count1++] = (Int)(*c_prev++);\n\n        c_prev += k1;\n        c_prev_u += k2;\n        c_prev_v += k2;\n\n\n    }\n\n    count = count1 = 0;\n    for (i = 0; i < B_SIZE; i++)\n    {\n        for (j = 0; j < B_SIZE; j++)\n            mb[2][count++] = (Int)(*c_prev++);\n\n        for (j = 0; j < B_SIZE; j++)\n            mb[3][count1++] = (Int)(*c_prev++);\n\n        c_prev += k1;\n    }\n}\n\nvoid PutSkippedBlock(UChar *rec, UChar *prev, Int lx)\n{\n    UChar *end;\n    Int offset = (lx - 8) >> 2;\n    Int *src, *dst;\n\n    dst = (Int*)rec;\n    src = (Int*)prev;\n\n    end = prev + (lx << 3);\n\n    do\n    {\n        *dst++ = *src++;\n        *dst++ = *src++;\n        dst += offset;\n        src += offset;\n    }\n    while ((UInt)src < (UInt)end);\n\n    return ;\n}\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/motion_est.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"mp4def.h\"\n#include \"mp4enc_lib.h\"\n#include \"mp4lib_int.h\"\n#include \"m4venc_oscl.h\"\n\n//#define PRINT_MV\n#define MIN_GOP 1   /* minimum size of GOP,  1/23/01, need to be tested */\n\n#define CANDIDATE_DISTANCE  0 /* distance candidate from one another to consider as a distinct one */\n/* shouldn't be more than 3 */\n\n#define ZERO_MV_PREF    0 /* 0: bias (0,0)MV before full-pel search, lowest complexity*/\n/* 1: bias (0,0)MV after full-pel search, before half-pel, highest comp */\n/* 2: bias (0,0)MV after half-pel, high comp, better PSNR */\n\n#define RASTER_REFRESH  /* instead of random INTRA refresh, do raster scan,  2/26/01 */\n\n#ifdef RASTER_REFRESH\n#define TARGET_REFRESH_PER_REGION 4 /* , no. MB per frame to be INTRA refreshed */\n#else\n#define TARGET_REFRESH_PER_REGION 1 /* , no. MB per region to be INTRA refreshed */\n#endif\n\n#define ALL_CAND_EQUAL  10  /*  any number greater than 5 will work */\n\n#define NumPixelMB  256     /*  number of pixels used in SAD calculation */\n\n#define DEF_8X8_WIN 3   /* search region for 8x8 MVs around the 16x16 MV */\n#define MB_Nb  256\n\n#define PREF_NULL_VEC 129   /* for zero vector bias */\n#define PREF_16_VEC 129     /* 1MV bias versus 4MVs*/\n#define PREF_INTRA  512     /* bias for INTRA coding */\n\nconst static Int tab_exclude[9][9] =  // [last_loc][curr_loc]\n{\n    {0, 0, 0, 0, 0, 0, 0, 0, 0},\n    {0, 0, 0, 0, 1, 1, 1, 0, 0},\n    {0, 0, 0, 0, 1, 1, 1, 1, 1},\n    {0, 0, 0, 0, 0, 0, 1, 1, 1},\n    {0, 1, 1, 0, 0, 0, 1, 1, 1},\n    {0, 1, 1, 0, 0, 0, 0, 0, 1},\n    {0, 1, 1, 1, 1, 0, 0, 0, 1},\n    {0, 0, 1, 1, 1, 0, 0, 0, 0},\n    {0, 0, 1, 1, 1, 1, 1, 0, 0}\n}; //to decide whether to continue or compute\n\nconst static Int refine_next[8][2] =    /* [curr_k][increment] */\n{\n    {0, 0}, {2, 0}, {1, 1}, {0, 2}, { -1, 1}, { -2, 0}, { -1, -1}, {0, -2}\n};\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n    void MBMotionSearch(VideoEncData *video, UChar *cur, UChar *best_cand[],\n    Int i0, Int j0, Int type_pred, Int fullsearch, Int *hp_guess);\n\n    Int  fullsearch(VideoEncData *video, Vol *currVol, UChar *ref, UChar *cur,\n                    Int *imin, Int *jmin, Int ilow, Int ihigh, Int jlow, Int jhigh);\n    Int fullsearchBlk(VideoEncData *video, Vol *currVol, UChar *cent, UChar *cur,\n                      Int *imin, Int *jmin, Int ilow, Int ihigh, Int jlow, Int jhigh, Int range);\n    void CandidateSelection(Int *mvx, Int *mvy, Int *num_can, Int imb, Int jmb,\n                            VideoEncData *video, Int type_pred);\n    void RasterIntraUpdate(UChar *intraArray, UChar *Mode, Int totalMB, Int numRefresh);\n    void ResetIntraUpdate(UChar *intraArray, Int totalMB);\n    void ResetIntraUpdateRegion(UChar *intraArray, Int start_i, Int rwidth,\n                                Int start_j, Int rheight, Int mbwidth, Int mbheight);\n\n    void MoveNeighborSAD(Int dn[], Int new_loc);\n    Int FindMin(Int dn[]);\n    void PrepareCurMB(VideoEncData *video, UChar *cur);\n\n#ifdef __cplusplus\n}\n#endif\n\n/***************************************/\n/*  2/28/01, for HYPOTHESIS TESTING */\n#ifdef HTFM     /* defined in mp4def.h */\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n    void CalcThreshold(double pf, double exp_lamda[], Int nrmlz_th[]);\n    void    HTFMPrepareCurMB(VideoEncData *video, HTFM_Stat *htfm_stat, UChar *cur);\n#ifdef __cplusplus\n}\n#endif\n\n\n#define HTFM_Pf  0.25   /* 3/2/1, probability of false alarm, can be varied from 0 to 0.5 */\n/***************************************/\n#endif\n\n#ifdef _SAD_STAT\nULong num_MB = 0;\nULong num_HP_MB = 0;\nULong num_Blk = 0;\nULong num_HP_Blk = 0;\nULong num_cand = 0;\nULong num_better_hp = 0;\nULong i_dist_from_guess = 0;\nULong j_dist_from_guess = 0;\nULong num_hp_not_zero = 0;\n#endif\n\n\n\n/*==================================================================\n    Function:   MotionEstimation\n    Date:       10/3/2000\n    Purpose:    Go through all macroblock for motion search and\n                determine scene change detection.\n====================================================================*/\n\nvoid MotionEstimation(VideoEncData *video)\n{\n    UChar use_4mv = video->encParams->MV8x8_Enabled;\n    Vol *currVol = video->vol[video->currLayer];\n    Vop *currVop = video->currVop;\n    VideoEncFrameIO *currFrame = video->input;\n    Int i, j, comp;\n    Int mbwidth = currVol->nMBPerRow;\n    Int mbheight = currVol->nMBPerCol;\n    Int totalMB = currVol->nTotalMB;\n    Int width = currFrame->pitch;\n    UChar *mode_mb, *Mode = video->headerInfo.Mode;\n    MOT *mot_mb, **mot = video->mot;\n    UChar *intraArray = video->intraArray;\n    Int FS_en = video->encParams->FullSearch_Enabled;\n    void (*ComputeMBSum)(UChar *, Int, MOT *) = video->functionPointer->ComputeMBSum;\n    void (*ChooseMode)(UChar*, UChar*, Int, Int) = video->functionPointer->ChooseMode;\n\n    Int numIntra, start_i, numLoop, incr_i;\n    Int mbnum, offset;\n    UChar *cur, *best_cand[5];\n    Int sad8 = 0, sad16 = 0;\n    Int totalSAD = 0;   /* average SAD for rate control */\n    Int skip_halfpel_4mv;\n    Int f_code_p, f_code_n, max_mag = 0, min_mag = 0;\n    Int type_pred;\n    Int xh[5] = {0, 0, 0, 0, 0};\n    Int yh[5] = {0, 0, 0, 0, 0}; /* half-pel */\n    UChar hp_mem4MV[17*17*4];\n\n#ifdef HTFM\n    /***** HYPOTHESIS TESTING ********/  /* 2/28/01 */\n    Int collect = 0;\n    HTFM_Stat htfm_stat;\n    double newvar[16];\n    double exp_lamda[15];\n    /*********************************/\n#endif\n    Int hp_guess = 0;\n#ifdef PRINT_MV\n    FILE *fp_debug;\n#endif\n\n//  FILE *fstat;\n//  static int frame_num = 0;\n\n    offset = 0;\n\n    if (video->currVop->predictionType == I_VOP)\n    {   /* compute the SAV */\n        mbnum = 0;\n        cur = currFrame->yChan;\n\n        for (j = 0; j < mbheight; j++)\n        {\n            for (i = 0; i < mbwidth; i++)\n            {\n                video->mbnum = mbnum;\n                mot_mb = mot[mbnum];\n\n                (*ComputeMBSum)(cur + (i << 4), width, mot_mb);\n\n                totalSAD += mot_mb[0].sad;\n\n                mbnum++;\n            }\n            cur += (width << 4);\n        }\n\n        video->sumMAD = (float)totalSAD / (float)NumPixelMB;\n\n        ResetIntraUpdate(intraArray, totalMB);\n\n        return  ;\n    }\n\n    /* 09/20/05 */\n    if (video->prevBaseVop->padded == 0 && !video->encParams->H263_Enabled)\n    {\n        PaddingEdge(video->prevBaseVop);\n        video->prevBaseVop->padded = 1;\n    }\n\n    /* Random INTRA update */\n    /*  suggest to do it in CodeMB */\n    /*  2/21/2001 */\n    //if(video->encParams->RC_Type == CBR_1 || video->encParams->RC_Type == CBR_2)\n    if (video->currLayer == 0 && video->encParams->Refresh)\n    {\n        RasterIntraUpdate(intraArray, Mode, totalMB, video->encParams->Refresh);\n    }\n\n    video->sad_extra_info = NULL;\n\n#ifdef HTFM\n    /***** HYPOTHESIS TESTING ********/  /* 2/28/01 */\n    InitHTFM(video, &htfm_stat, newvar, &collect);\n    /*********************************/\n#endif\n\n    if ((video->encParams->SceneChange_Det == 1) /*&& video->currLayer==0 */\n            && ((video->encParams->LayerFrameRate[0] < 5.0) || (video->numVopsInGOP > MIN_GOP)))\n        /* do not try to detect a new scene if low frame rate and too close to previous I-frame */\n    {\n        incr_i = 2;\n        numLoop = 2;\n        start_i = 1;\n        type_pred = 0; /* for initial candidate selection */\n    }\n    else\n    {\n        incr_i = 1;\n        numLoop = 1;\n        start_i = 0;\n        type_pred = 2;\n    }\n\n    /* First pass, loop thru half the macroblock */\n    /* determine scene change */\n    /* Second pass, for the rest of macroblocks */\n    numIntra = 0;\n    while (numLoop--)\n    {\n        for (j = 0; j < mbheight; j++)\n        {\n            if (incr_i > 1)\n                start_i = (start_i == 0 ? 1 : 0) ; /* toggle 0 and 1 */\n\n            offset = width * (j << 4) + (start_i << 4);\n\n            mbnum = j * mbwidth + start_i;\n\n            for (i = start_i; i < mbwidth; i += incr_i)\n            {\n                video->mbnum = mbnum;\n                mot_mb = mot[mbnum];\n                mode_mb = Mode + mbnum;\n\n                cur = currFrame->yChan + offset;\n\n\n                if (*mode_mb != MODE_INTRA)\n                {\n#if defined(HTFM)\n                    HTFMPrepareCurMB(video, &htfm_stat, cur);\n#else\n                    PrepareCurMB(video, cur);\n#endif\n                    /************************************************************/\n                    /******** full-pel 1MV and 4MVs search **********************/\n\n#ifdef _SAD_STAT\n                    num_MB++;\n#endif\n                    MBMotionSearch(video, cur, best_cand, i << 4, j << 4, type_pred,\n                                   FS_en, &hp_guess);\n\n#ifdef PRINT_MV\n                    fp_debug = fopen(\"c:\\\\bitstream\\\\mv1_debug.txt\", \"a\");\n                    fprintf(fp_debug, \"#%d (%d,%d,%d) : \", mbnum, mot_mb[0].x, mot_mb[0].y, mot_mb[0].sad);\n                    fprintf(fp_debug, \"(%d,%d,%d) : (%d,%d,%d) : (%d,%d,%d) : (%d,%d,%d) : ==>\\n\",\n                            mot_mb[1].x, mot_mb[1].y, mot_mb[1].sad,\n                            mot_mb[2].x, mot_mb[2].y, mot_mb[2].sad,\n                            mot_mb[3].x, mot_mb[3].y, mot_mb[3].sad,\n                            mot_mb[4].x, mot_mb[4].y, mot_mb[4].sad);\n                    fclose(fp_debug);\n#endif\n                    sad16 = mot_mb[0].sad;\n#ifdef NO_INTER4V\n                    sad8 = sad16;\n#else\n                    sad8 = mot_mb[1].sad + mot_mb[2].sad + mot_mb[3].sad + mot_mb[4].sad;\n#endif\n\n                    /* choose between INTRA or INTER */\n                    (*ChooseMode)(mode_mb, cur, width, ((sad8 < sad16) ? sad8 : sad16));\n                }\n                else    /* INTRA update, use for prediction 3/23/01 */\n                {\n                    mot_mb[0].x = mot_mb[0].y = 0;\n                }\n\n                if (*mode_mb == MODE_INTRA)\n                {\n                    numIntra++ ;\n\n                    /* compute SAV for rate control and fast DCT, 11/28/00 */\n                    (*ComputeMBSum)(cur, width, mot_mb);\n\n                    /* leave mot_mb[0] as it is for fast motion search */\n                    /* set the 4 MVs to zeros */\n                    for (comp = 1; comp <= 4; comp++)\n                    {\n                        mot_mb[comp].x = 0;\n                        mot_mb[comp].y = 0;\n                    }\n#ifdef PRINT_MV\n                    fp_debug = fopen(\"c:\\\\bitstream\\\\mv1_debug.txt\", \"a\");\n                    fprintf(fp_debug, \"\\n\");\n                    fclose(fp_debug);\n#endif\n                }\n                else /* *mode_mb = MODE_INTER;*/\n                {\n                    if (video->encParams->HalfPel_Enabled)\n                    {\n#ifdef _SAD_STAT\n                        num_HP_MB++;\n#endif\n                        /* find half-pel resolution motion vector */\n                        FindHalfPelMB(video, cur, mot_mb, best_cand[0],\n                                      i << 4, j << 4, xh, yh, hp_guess);\n#ifdef PRINT_MV\n                        fp_debug = fopen(\"c:\\\\bitstream\\\\mv1_debug.txt\", \"a\");\n                        fprintf(fp_debug, \"(%d,%d), %d\\n\", mot_mb[0].x, mot_mb[0].y, mot_mb[0].sad);\n                        fclose(fp_debug);\n#endif\n                        skip_halfpel_4mv = ((sad16 - mot_mb[0].sad) <= (MB_Nb >> 1) + 1);\n                        sad16 = mot_mb[0].sad;\n\n#ifndef NO_INTER4V\n                        if (use_4mv && !skip_halfpel_4mv)\n                        {\n                            /* Also decide 1MV or 4MV !!!!!!!!*/\n                            sad8 = FindHalfPelBlk(video, cur, mot_mb, sad16,\n                                                  best_cand, mode_mb, i << 4, j << 4, xh, yh, hp_mem4MV);\n\n#ifdef PRINT_MV\n                            fp_debug = fopen(\"c:\\\\bitstream\\\\mv1_debug.txt\", \"a\");\n                            fprintf(fp_debug, \" (%d,%d,%d) : (%d,%d,%d) : (%d,%d,%d) : (%d,%d,%d) \\n\",\n                                    mot_mb[1].x, mot_mb[1].y, mot_mb[1].sad,\n                                    mot_mb[2].x, mot_mb[2].y, mot_mb[2].sad,\n                                    mot_mb[3].x, mot_mb[3].y, mot_mb[3].sad,\n                                    mot_mb[4].x, mot_mb[4].y, mot_mb[4].sad);\n                            fclose(fp_debug);\n#endif\n                        }\n#endif /* NO_INTER4V */\n                    }\n                    else    /* HalfPel_Enabled ==0  */\n                    {\n#ifndef NO_INTER4V\n                        //if(sad16 < sad8-PREF_16_VEC)\n                        if (sad16 - PREF_16_VEC > sad8)\n                        {\n                            *mode_mb = MODE_INTER4V;\n                        }\n#endif\n                    }\n#if (ZERO_MV_PREF==2)   /* use mot_mb[7].sad as d0 computed in MBMotionSearch*/\n                    /******************************************************/\n                    if (mot_mb[7].sad - PREF_NULL_VEC < sad16 && mot_mb[7].sad - PREF_NULL_VEC < sad8)\n                    {\n                        mot_mb[0].sad = mot_mb[7].sad - PREF_NULL_VEC;\n                        mot_mb[0].x = mot_mb[0].y = 0;\n                        *mode_mb = MODE_INTER;\n                    }\n                    /******************************************************/\n#endif\n                    if (*mode_mb == MODE_INTER)\n                    {\n                        if (mot_mb[0].x == 0 && mot_mb[0].y == 0)   /* use zero vector */\n                            mot_mb[0].sad += PREF_NULL_VEC; /* add back the bias */\n\n                        mot_mb[1].sad = mot_mb[2].sad = mot_mb[3].sad = mot_mb[4].sad = (mot_mb[0].sad + 2) >> 2;\n                        mot_mb[1].x = mot_mb[2].x = mot_mb[3].x = mot_mb[4].x = mot_mb[0].x;\n                        mot_mb[1].y = mot_mb[2].y = mot_mb[3].y = mot_mb[4].y = mot_mb[0].y;\n\n                    }\n                }\n\n                /* find maximum magnitude */\n                /* compute average SAD for rate control, 11/28/00 */\n                if (*mode_mb == MODE_INTER)\n                {\n#ifdef PRINT_MV\n                    fp_debug = fopen(\"c:\\\\bitstream\\\\mv1_debug.txt\", \"a\");\n                    fprintf(fp_debug, \"%d MODE_INTER\\n\", mbnum);\n                    fclose(fp_debug);\n#endif\n                    totalSAD += mot_mb[0].sad;\n                    if (mot_mb[0].x > max_mag)\n                        max_mag = mot_mb[0].x;\n                    if (mot_mb[0].y > max_mag)\n                        max_mag = mot_mb[0].y;\n                    if (mot_mb[0].x < min_mag)\n                        min_mag = mot_mb[0].x;\n                    if (mot_mb[0].y < min_mag)\n                        min_mag = mot_mb[0].y;\n                }\n                else if (*mode_mb == MODE_INTER4V)\n                {\n#ifdef PRINT_MV\n                    fp_debug = fopen(\"c:\\\\bitstream\\\\mv1_debug.txt\", \"a\");\n                    fprintf(fp_debug, \"%d MODE_INTER4V\\n\", mbnum);\n                    fclose(fp_debug);\n#endif\n                    totalSAD += sad8;\n                    for (comp = 1; comp <= 4; comp++)\n                    {\n                        if (mot_mb[comp].x > max_mag)\n                            max_mag = mot_mb[comp].x;\n                        if (mot_mb[comp].y > max_mag)\n                            max_mag = mot_mb[comp].y;\n                        if (mot_mb[comp].x < min_mag)\n                            min_mag = mot_mb[comp].x;\n                        if (mot_mb[comp].y < min_mag)\n                            min_mag = mot_mb[comp].y;\n                    }\n                }\n                else    /* MODE_INTRA */\n                {\n#ifdef PRINT_MV\n                    fp_debug = fopen(\"c:\\\\bitstream\\\\mv1_debug.txt\", \"a\");\n                    fprintf(fp_debug, \"%d MODE_INTRA\\n\", mbnum);\n                    fclose(fp_debug);\n#endif\n                    totalSAD += mot_mb[0].sad;\n                }\n                mbnum += incr_i;\n                offset += (incr_i << 4);\n\n            }\n        }\n\n        if (incr_i > 1 && numLoop) /* scene change on and first loop */\n        {\n            //if(numIntra > ((totalMB>>3)<<1) + (totalMB>>3)) /* 75% of 50%MBs */\n            if (numIntra > (0.30*(totalMB / 2.0))) /* 15% of 50%MBs */\n            {\n                /******** scene change detected *******************/\n                currVop->predictionType = I_VOP;\n                M4VENC_MEMSET(Mode, MODE_INTRA, sizeof(UChar)*totalMB); /* set this for MB level coding*/\n                currVop->quantizer = video->encParams->InitQuantIvop[video->currLayer];\n\n                /* compute the SAV for rate control & fast DCT */\n                totalSAD = 0;\n                offset = 0;\n                mbnum = 0;\n                cur = currFrame->yChan;\n\n                for (j = 0; j < mbheight; j++)\n                {\n                    for (i = 0; i < mbwidth; i++)\n                    {\n                        video->mbnum = mbnum;\n                        mot_mb = mot[mbnum];\n\n\n                        (*ComputeMBSum)(cur + (i << 4), width, mot_mb);\n                        totalSAD += mot_mb[0].sad;\n\n                        mbnum++;\n                    }\n                    cur += (width << 4);\n                }\n\n                video->sumMAD = (float)totalSAD / (float)NumPixelMB;\n                ResetIntraUpdate(intraArray, totalMB);\n                /* video->numVopsInGOP=0; 3/13/01 move it to vop.c*/\n\n                return ;\n            }\n        }\n        /******** no scene change, continue motion search **********************/\n        start_i = 0;\n        type_pred++; /* second pass */\n    }\n\n    video->sumMAD = (float)totalSAD / (float)NumPixelMB;    /* avg SAD */\n\n    /* find f_code , 10/27/2000 */\n    f_code_p = 1;\n    while ((max_mag >> (4 + f_code_p)) > 0)\n        f_code_p++;\n\n    f_code_n = 1;\n    min_mag *= -1;\n    while ((min_mag - 1) >> (4 + f_code_n) > 0)\n        f_code_n++;\n\n    currVop->fcodeForward = (f_code_p > f_code_n ? f_code_p : f_code_n);\n\n#ifdef HTFM\n    /***** HYPOTHESIS TESTING ********/  /* 2/28/01 */\n    if (collect)\n    {\n        collect = 0;\n        UpdateHTFM(video, newvar, exp_lamda, &htfm_stat);\n    }\n    /*********************************/\n#endif\n\n    return ;\n}\n\n\n#ifdef HTFM\nvoid InitHTFM(VideoEncData *video, HTFM_Stat *htfm_stat, double *newvar, Int *collect)\n{\n    Int i;\n    Int lx = video->currVop->width; //  padding\n    Int lx2 = lx << 1;\n    Int lx3 = lx2 + lx;\n    Int rx = video->currVop->pitch;\n    Int rx2 = rx << 1;\n    Int rx3 = rx2 + rx;\n\n    Int *offset, *offset2;\n\n    /* 4/11/01, collect data every 30 frames, doesn't have to be base layer */\n    if (((Int)video->numVopsInGOP) % 30 == 1)\n    {\n\n        *collect = 1;\n\n        htfm_stat->countbreak = 0;\n        htfm_stat->abs_dif_mad_avg = 0;\n\n        for (i = 0; i < 16; i++)\n        {\n            newvar[i] = 0.0;\n        }\n//      video->functionPointer->SAD_MB_PADDING = &SAD_MB_PADDING_HTFM_Collect;\n        video->functionPointer->SAD_Macroblock = &SAD_MB_HTFM_Collect;\n        video->functionPointer->SAD_MB_HalfPel[0] = NULL;\n        video->functionPointer->SAD_MB_HalfPel[1] = &SAD_MB_HP_HTFM_Collectxh;\n        video->functionPointer->SAD_MB_HalfPel[2] = &SAD_MB_HP_HTFM_Collectyh;\n        video->functionPointer->SAD_MB_HalfPel[3] = &SAD_MB_HP_HTFM_Collectxhyh;\n        video->sad_extra_info = (void*)(htfm_stat);\n        offset = htfm_stat->offsetArray;\n        offset2 = htfm_stat->offsetRef;\n    }\n    else\n    {\n//      video->functionPointer->SAD_MB_PADDING = &SAD_MB_PADDING_HTFM;\n        video->functionPointer->SAD_Macroblock = &SAD_MB_HTFM;\n        video->functionPointer->SAD_MB_HalfPel[0] = NULL;\n        video->functionPointer->SAD_MB_HalfPel[1] = &SAD_MB_HP_HTFMxh;\n        video->functionPointer->SAD_MB_HalfPel[2] = &SAD_MB_HP_HTFMyh;\n        video->functionPointer->SAD_MB_HalfPel[3] = &SAD_MB_HP_HTFMxhyh;\n        video->sad_extra_info = (void*)(video->nrmlz_th);\n        offset = video->nrmlz_th + 16;\n        offset2 = video->nrmlz_th + 32;\n    }\n\n    offset[0] = 0;\n    offset[1] = lx2 + 2;\n    offset[2] = 2;\n    offset[3] = lx2;\n    offset[4] = lx + 1;\n    offset[5] = lx3 + 3;\n    offset[6] = lx + 3;\n    offset[7] = lx3 + 1;\n    offset[8] = lx;\n    offset[9] = lx3 + 2;\n    offset[10] = lx3 ;\n    offset[11] = lx + 2 ;\n    offset[12] = 1;\n    offset[13] = lx2 + 3;\n    offset[14] = lx2 + 1;\n    offset[15] = 3;\n\n    offset2[0] = 0;\n    offset2[1] = rx2 + 2;\n    offset2[2] = 2;\n    offset2[3] = rx2;\n    offset2[4] = rx + 1;\n    offset2[5] = rx3 + 3;\n    offset2[6] = rx + 3;\n    offset2[7] = rx3 + 1;\n    offset2[8] = rx;\n    offset2[9] = rx3 + 2;\n    offset2[10] = rx3 ;\n    offset2[11] = rx + 2 ;\n    offset2[12] = 1;\n    offset2[13] = rx2 + 3;\n    offset2[14] = rx2 + 1;\n    offset2[15] = 3;\n\n    return ;\n}\n\nvoid UpdateHTFM(VideoEncData *video, double *newvar, double *exp_lamda, HTFM_Stat *htfm_stat)\n{\n    if (htfm_stat->countbreak == 0)\n        htfm_stat->countbreak = 1;\n\n    newvar[0] = (double)(htfm_stat->abs_dif_mad_avg) / (htfm_stat->countbreak * 16.);\n\n    if (newvar[0] < 0.001)\n    {\n        newvar[0] = 0.001; /* to prevent floating overflow */\n    }\n    exp_lamda[0] =  1 / (newvar[0] * 1.4142136);\n    exp_lamda[1] = exp_lamda[0] * 1.5825;\n    exp_lamda[2] = exp_lamda[0] * 2.1750;\n    exp_lamda[3] = exp_lamda[0] * 3.5065;\n    exp_lamda[4] = exp_lamda[0] * 3.1436;\n    exp_lamda[5] = exp_lamda[0] * 3.5315;\n    exp_lamda[6] = exp_lamda[0] * 3.7449;\n    exp_lamda[7] = exp_lamda[0] * 4.5854;\n    exp_lamda[8] = exp_lamda[0] * 4.6191;\n    exp_lamda[9] = exp_lamda[0] * 5.4041;\n    exp_lamda[10] = exp_lamda[0] * 6.5974;\n    exp_lamda[11] = exp_lamda[0] * 10.5341;\n    exp_lamda[12] = exp_lamda[0] * 10.0719;\n    exp_lamda[13] = exp_lamda[0] * 12.0516;\n    exp_lamda[14] = exp_lamda[0] * 15.4552;\n\n    CalcThreshold(HTFM_Pf, exp_lamda, video->nrmlz_th);\n    return ;\n}\n\n\nvoid CalcThreshold(double pf, double exp_lamda[], Int nrmlz_th[])\n{\n    Int i;\n    double temp[15];\n    //  printf(\"\\nLamda: \");\n\n    /* parametric PREMODELling */\n    for (i = 0; i < 15; i++)\n    {\n        //    printf(\"%g \",exp_lamda[i]);\n        if (pf < 0.5)\n            temp[i] = 1 / exp_lamda[i] * M4VENC_LOG(2 * pf);\n        else\n            temp[i] = -1 / exp_lamda[i] * M4VENC_LOG(2 * (1 - pf));\n    }\n\n    nrmlz_th[15] = 0;\n    for (i = 0; i < 15; i++)        /* scale upto no.pixels */\n        nrmlz_th[i] = (Int)(temp[i] * ((i + 1) << 4) + 0.5);\n\n    return ;\n}\n\nvoid    HTFMPrepareCurMB(VideoEncData *video, HTFM_Stat *htfm_stat, UChar *cur)\n{\n    void* tmp = (void*)(video->currYMB);\n    ULong *htfmMB = (ULong*)tmp;\n    UChar *ptr, byte;\n    Int *offset;\n    Int i;\n    ULong word;\n    Int width = video->currVop->width;\n\n    if (((Int)video->numVopsInGOP) % 30 == 1)\n    {\n        offset = htfm_stat->offsetArray;\n    }\n    else\n    {\n        offset = video->nrmlz_th + 16;\n    }\n\n    for (i = 0; i < 16; i++)\n    {\n        ptr = cur + offset[i];\n        word = ptr[0];\n        byte = ptr[4];\n        word |= (byte << 8);\n        byte = ptr[8];\n        word |= (byte << 16);\n        byte = ptr[12];\n        word |= (byte << 24);\n        *htfmMB++ = word;\n\n        word = *(ptr += (width << 2));\n        byte = ptr[4];\n        word |= (byte << 8);\n        byte = ptr[8];\n        word |= (byte << 16);\n        byte = ptr[12];\n        word |= (byte << 24);\n        *htfmMB++ = word;\n\n        word = *(ptr += (width << 2));\n        byte = ptr[4];\n        word |= (byte << 8);\n        byte = ptr[8];\n        word |= (byte << 16);\n        byte = ptr[12];\n        word |= (byte << 24);\n        *htfmMB++ = word;\n\n        word = *(ptr += (width << 2));\n        byte = ptr[4];\n        word |= (byte << 8);\n        byte = ptr[8];\n        word |= (byte << 16);\n        byte = ptr[12];\n        word |= (byte << 24);\n        *htfmMB++ = word;\n    }\n\n    return ;\n}\n\n\n#endif\n\nvoid    PrepareCurMB(VideoEncData *video, UChar *cur)\n{\n    void* tmp = (void*)(video->currYMB);\n    ULong *currYMB = (ULong*)tmp;\n    Int i;\n    Int width = video->currVop->width;\n\n    cur -= width;\n\n    for (i = 0; i < 16; i++)\n    {\n        *currYMB++ = *((ULong*)(cur += width));\n        *currYMB++ = *((ULong*)(cur + 4));\n        *currYMB++ = *((ULong*)(cur + 8));\n        *currYMB++ = *((ULong*)(cur + 12));\n    }\n\n    return ;\n}\n\n\n/*==================================================================\n    Function:   MBMotionSearch\n    Date:       09/06/2000\n    Purpose:    Perform motion estimation for a macroblock.\n                Find 1MV and 4MVs in half-pels resolutions.\n                Using ST1 algorithm provided by Chalidabhongse and Kuo\n                CSVT March'98.\n\n==================================================================*/\n\nvoid MBMotionSearch(VideoEncData *video, UChar *cur, UChar *best_cand[],\n                    Int i0, Int j0, Int type_pred, Int FS_en, Int *hp_guess)\n{\n    Vol *currVol = video->vol[video->currLayer];\n    UChar *ref, *cand, *ncand = NULL, *cur8;\n    void *extra_info = video->sad_extra_info;\n    Int mbnum = video->mbnum;\n    Int width = video->currVop->width; /* 6/12/01, must be multiple of 16 */\n    Int height = video->currVop->height;\n    MOT **mot = video->mot;\n    UChar use_4mv = video->encParams->MV8x8_Enabled;\n    UChar h263_mode = video->encParams->H263_Enabled;\n    Int(*SAD_Macroblock)(UChar*, UChar*, Int, void*) = video->functionPointer->SAD_Macroblock;\n    Int(*SAD_Block)(UChar*, UChar*, Int, Int, void*) = video->functionPointer->SAD_Block;\n    VideoEncParams *encParams = video->encParams;\n    Int range = encParams->SearchRange;\n\n    Int lx = video->currVop->pitch; /* padding */\n    Int comp;\n    Int i, j, imin, jmin, ilow, ihigh, jlow, jhigh, iorg, jorg;\n    Int d, dmin, dn[9];\n#if (ZERO_MV_PREF==1)   /* compute (0,0) MV at the end */\n    Int d0;\n#endif\n    Int k;\n    Int mvx[5], mvy[5], imin0, jmin0;\n    Int num_can, center_again;\n    Int last_loc, new_loc = 0;\n    Int step, max_step = range >> 1;\n    Int next;\n\n    ref = video->forwardRefVop->yChan; /* origin of actual frame */\n\n    cur = video->currYMB; /* use smaller memory space for current MB */\n\n    /*  find limit of the search (adjusting search range)*/\n\n    if (!h263_mode)\n    {\n        ilow = i0 - range;\n        if (ilow < -15)\n            ilow = -15;\n        ihigh = i0 + range - 1;\n        if (ihigh > width - 1)\n            ihigh = width - 1;\n        jlow = j0 - range;\n        if (jlow < -15)\n            jlow = -15;\n        jhigh = j0 + range - 1;\n        if (jhigh > height - 1)\n            jhigh = height - 1;\n    }\n    else\n    {\n        ilow = i0 - range;\n        if (ilow < 0)\n            ilow = 0;\n        ihigh = i0 + range - 1;\n        if (ihigh > width - 16)\n            ihigh = width - 16;\n        jlow = j0 - range;\n        if (jlow < 0)\n            jlow = 0;\n        jhigh = j0 + range - 1;\n        if (jhigh > height - 16)\n            jhigh = height - 16;\n    }\n\n    imin = i0;\n    jmin = j0; /* needed for fullsearch */\n    ncand = ref + imin + jmin * lx;\n\n    /* for first row of MB, fullsearch can be used */\n    if (FS_en)\n    {\n        *hp_guess = 0; /* no guess for fast half-pel */\n\n        dmin =  fullsearch(video, currVol, ref, cur, &imin, &jmin, ilow, ihigh, jlow, jhigh);\n\n        ncand = ref + imin + jmin * lx;\n\n        mot[mbnum][0].sad = dmin;\n        mot[mbnum][0].x = (imin - i0) << 1;\n        mot[mbnum][0].y = (jmin - j0) << 1;\n        imin0 = imin << 1;  /* 16x16 MV in half-pel resolution */\n        jmin0 = jmin << 1;\n        best_cand[0] = ncand;\n    }\n    else\n    {   /* 4/7/01, modified this testing for fullsearch the top row to only upto (0,3) MB */\n        /*            upto 30% complexity saving with the same complexity */\n        if (video->forwardRefVop->predictionType == I_VOP && j0 == 0 && i0 <= 64 && type_pred != 1)\n        {\n            *hp_guess = 0; /* no guess for fast half-pel */\n            dmin =  fullsearch(video, currVol, ref, cur, &imin, &jmin, ilow, ihigh, jlow, jhigh);\n            ncand = ref + imin + jmin * lx;\n        }\n        else\n        {\n            /************** initialize candidate **************************/\n            /* find initial motion vector */\n            CandidateSelection(mvx, mvy, &num_can, i0 >> 4, j0 >> 4, video, type_pred);\n\n            dmin = 65535;\n\n            /* check if all are equal */\n            if (num_can == ALL_CAND_EQUAL)\n            {\n                i = i0 + mvx[0];\n                j = j0 + mvy[0];\n\n                if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh)\n                {\n                    cand = ref + i + j * lx;\n\n                    d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, extra_info);\n\n                    if (d < dmin)\n                    {\n                        dmin = d;\n                        imin = i;\n                        jmin = j;\n                        ncand = cand;\n                    }\n                }\n            }\n            else\n            {\n                /************** evaluate unique candidates **********************/\n                for (k = 0; k < num_can; k++)\n                {\n                    i = i0 + mvx[k];\n                    j = j0 + mvy[k];\n\n                    if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh)\n                    {\n                        cand = ref + i + j * lx;\n                        d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, extra_info);\n\n                        if (d < dmin)\n                        {\n                            dmin = d;\n                            imin = i;\n                            jmin = j;\n                            ncand = cand;\n                        }\n                        else if ((d == dmin) && PV_ABS(mvx[k]) + PV_ABS(mvy[k]) < PV_ABS(i0 - imin) + PV_ABS(j0 - jmin))\n                        {\n                            dmin = d;\n                            imin = i;\n                            jmin = j;\n                            ncand = cand;\n                        }\n                    }\n                }\n            }\n            if (num_can == 0 || dmin == 65535) /* no candidate selected */\n            {\n                ncand = ref + i0 + j0 * lx; /* use (0,0) MV as initial value */\n                mot[mbnum][7].sad = dmin = (*SAD_Macroblock)(ncand, cur, (65535 << 16) | lx, extra_info);\n#if (ZERO_MV_PREF==1)   /* compute (0,0) MV at the end */\n                d0 = dmin;\n#endif\n                imin = i0;\n                jmin = j0;\n            }\n\n#if (ZERO_MV_PREF==0)  /*  COMPUTE ZERO VECTOR FIRST !!!!!*/\n            dmin -= PREF_NULL_VEC;\n#endif\n\n            /******************* local refinement ***************************/\n            center_again = 0;\n            last_loc = new_loc = 0;\n            //          ncand = ref + jmin*lx + imin;  /* center of the search */\n            step = 0;\n            dn[0] = dmin;\n            while (!center_again && step <= max_step)\n            {\n\n                MoveNeighborSAD(dn, last_loc);\n\n                center_again = 1;\n                i = imin;\n                j = jmin - 1;\n                cand = ref + i + j * lx;\n\n                /*  starting from [0,-1] */\n                /* spiral check one step at a time*/\n                for (k = 2; k <= 8; k += 2)\n                {\n                    if (!tab_exclude[last_loc][k]) /* exclude last step computation */\n                    {       /* not already computed */\n                        if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh)\n                        {\n                            d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, extra_info);\n                            dn[k] = d; /* keep it for half pel use */\n\n                            if (d < dmin)\n                            {\n                                ncand = cand;\n                                dmin = d;\n                                imin = i;\n                                jmin = j;\n                                center_again = 0;\n                                new_loc = k;\n                            }\n                            else if ((d == dmin) && PV_ABS(i0 - i) + PV_ABS(j0 - j) < PV_ABS(i0 - imin) + PV_ABS(j0 - jmin))\n                            {\n                                ncand = cand;\n                                imin = i;\n                                jmin = j;\n                                center_again = 0;\n                                new_loc = k;\n                            }\n                        }\n                    }\n                    if (k == 8)  /* end side search*/\n                    {\n                        if (!center_again)\n                        {\n                            k = -1; /* start diagonal search */\n                            cand -= lx;\n                            j--;\n                        }\n                    }\n                    else\n                    {\n                        next = refine_next[k][0];\n                        i += next;\n                        cand += next;\n                        next = refine_next[k][1];\n                        j += next;\n                        cand += lx * next;\n                    }\n                }\n                last_loc = new_loc;\n                step ++;\n            }\n            if (!center_again)\n                MoveNeighborSAD(dn, last_loc);\n\n            *hp_guess = FindMin(dn);\n\n        }\n\n#if (ZERO_MV_PREF==1)   /* compute (0,0) MV at the end */\n        if (d0 - PREF_NULL_VEC < dmin)\n        {\n            ncand = ref + i0 + j0 * lx;\n            dmin = d0;\n            imin = i0;\n            jmin = j0;\n        }\n#endif\n        mot[mbnum][0].sad = dmin;\n        mot[mbnum][0].x = (imin - i0) << 1;\n        mot[mbnum][0].y = (jmin - j0) << 1;\n        imin0 = imin << 1;  /* 16x16 MV in half-pel resolution */\n        jmin0 = jmin << 1;\n        best_cand[0] = ncand;\n    }\n    /* imin and jmin is the best 1 MV */\n#ifndef NO_INTER4V\n    /*******************  Find 4 motion vectors ****************************/\n    if (use_4mv && !h263_mode)\n    {\n#ifdef _SAD_STAT\n        num_Blk += 4;\n#endif\n        /* starting from the best 1MV */\n        //offset = imin + jmin*lx;\n        iorg = i0;\n        jorg = j0;\n\n        for (comp = 0; comp < 4; comp++)\n        {\n            i0 = iorg + ((comp & 1) << 3);\n            j0 = jorg + ((comp & 2) << 2);\n\n            imin = (imin0 >> 1) + ((comp & 1) << 3);    /* starting point from 16x16 MV */\n            jmin = (jmin0 >> 1) + ((comp & 2) << 2);\n            ncand = ref + imin + jmin * lx;\n\n            cur8 = cur + ((comp & 1) << 3) + (((comp & 2) << 2) << 4) ; /* 11/30/05, smaller cache */\n\n            /*  find limit of the search (adjusting search range)*/\n            ilow = i0 - range;\n            ihigh = i0 + range - 1 ;/* 4/9/01 */\n            if (ilow < -15)\n                ilow = -15;\n            if (ihigh > width - 1)\n                ihigh = width - 1;\n            jlow = j0 - range;\n            jhigh = j0 + range - 1 ;/* 4/9/01 */\n            if (jlow < -15)\n                jlow = -15;\n            if (jhigh > height - 1)\n                jhigh = height - 1;\n\n            SAD_Block = video->functionPointer->SAD_Block;\n\n            if (FS_en)  /* fullsearch enable, center around 16x16 MV */\n            {\n                dmin =  fullsearchBlk(video, currVol, ncand, cur8, &imin, &jmin, ilow, ihigh, jlow, jhigh, range);\n                ncand = ref + imin + jmin * lx;\n\n                mot[mbnum][comp+1].sad = dmin;\n                mot[mbnum][comp+1].x = (imin - i0) << 1;\n                mot[mbnum][comp+1].y = (jmin - j0) << 1;\n                best_cand[comp+1] = ncand;\n            }\n            else    /* no fullsearch, do local search */\n            {\n                /* starting point from 16x16 */\n                dmin = (*SAD_Block)(ncand, cur8, 65536, lx, extra_info);\n\n                /******************* local refinement ***************************/\n                center_again = 0;\n                last_loc = 0;\n\n                while (!center_again)\n                {\n                    center_again = 1;\n                    i = imin;\n                    j = jmin - 1;\n                    cand = ref + i + j * lx;\n\n                    /*  starting from [0,-1] */\n                    /* spiral check one step at a time*/\n                    for (k = 2; k <= 8; k += 2)\n                    {\n                        if (!tab_exclude[last_loc][k]) /* exclude last step computation */\n                        {       /* not already computed */\n                            if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh)\n                            {\n                                d = (*SAD_Block)(cand, cur8, dmin, lx, extra_info);\n\n                                if (d < dmin)\n                                {\n                                    ncand = cand;\n                                    dmin = d;\n                                    imin = i;\n                                    jmin = j;\n                                    center_again = 0;\n                                    new_loc = k;\n                                }\n                                else if ((d == dmin) &&\n                                         PV_ABS(i0 - i) + PV_ABS(j0 - j) < PV_ABS(i0 - imin) + PV_ABS(j0 - jmin))\n                                {\n                                    ncand = cand;\n                                    imin = i;\n                                    jmin = j;\n                                    center_again = 0;\n                                    new_loc = k;\n                                }\n                            }\n                        }\n                        if (k == 8)  /* end side search*/\n                        {\n                            if (!center_again)\n                            {\n                                k = -1; /* start diagonal search */\n                                if (j <= height - 1 && j > 0)   cand -= lx;\n                                j--;\n                            }\n                        }\n                        else\n                        {\n                            next = refine_next[k][0];\n                            cand += next;\n                            i += next;\n                            next = refine_next[k][1];\n                            cand += lx * next;\n                            j += next;\n                        }\n                    }\n                    last_loc = new_loc;\n                }\n                mot[mbnum][comp+1].sad = dmin;\n                mot[mbnum][comp+1].x = (imin - i0) << 1;\n                mot[mbnum][comp+1].y = (jmin - j0) << 1;\n                best_cand[comp+1] = ncand;\n            }\n            /********************************************/\n        }\n    }\n    else\n#endif  /* NO_INTER4V */\n    {\n        mot[mbnum][1].sad = mot[mbnum][2].sad = mot[mbnum][3].sad = mot[mbnum][4].sad = (dmin + 2) >> 2;\n        mot[mbnum][1].x = mot[mbnum][2].x = mot[mbnum][3].x = mot[mbnum][4].x = mot[mbnum][0].x;\n        mot[mbnum][1].y = mot[mbnum][2].y = mot[mbnum][3].y = mot[mbnum][4].y = mot[mbnum][0].y;\n        best_cand[1] = best_cand[2] = best_cand[3] = best_cand[4] = ncand;\n\n    }\n    return ;\n}\n\n\n/*===============================================================================\n    Function:   fullsearch\n    Date:       09/16/2000\n    Purpose:    Perform full-search motion estimation over the range of search\n                region in a spiral-outward manner.\n    Input/Output:   VideoEncData, current Vol, previou Vop, pointer to the left corner of\n                current VOP, current coord (also output), boundaries.\n===============================================================================*/\n\nInt fullsearch(VideoEncData *video, Vol *currVol, UChar *prev, UChar *cur,\n               Int *imin, Int *jmin, Int ilow, Int ihigh, Int jlow, Int jhigh)\n{\n    Int range = video->encParams->SearchRange;\n    UChar *cand;\n    Int i, j, k, l;\n    Int d, dmin;\n    Int i0 = *imin; /* current position */\n    Int j0 = *jmin;\n    Int(*SAD_Macroblock)(UChar*, UChar*, Int, void*) = video->functionPointer->SAD_Macroblock;\n    void *extra_info = video->sad_extra_info;\n//  UChar h263_mode = video->encParams->H263_Enabled;\n    Int lx = video->currVop->pitch; /* with padding */\n\n    Int offset = i0 + j0 * lx;\n\n    OSCL_UNUSED_ARG(currVol);\n\n    cand = prev + offset;\n\n    dmin  = (*SAD_Macroblock)(cand, cur, (65535 << 16) | lx, (void*)extra_info) - PREF_NULL_VEC;\n\n    /* perform spiral search */\n    for (k = 1; k <= range; k++)\n    {\n\n        i = i0 - k;\n        j = j0 - k;\n\n        cand = prev + i + j * lx;\n\n        for (l = 0; l < 8*k; l++)\n        {\n            /* no need for boundary checking again */\n            if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh)\n            {\n                d = (*SAD_Macroblock)(cand, cur, (dmin << 16) | lx, (void*)extra_info);\n\n                if (d < dmin)\n                {\n                    dmin = d;\n                    *imin = i;\n                    *jmin = j;\n                }\n                else if ((d == dmin) && PV_ABS(i0 - i) + PV_ABS(j0 - j) < PV_ABS(i0 - *imin) + PV_ABS(j0 - *jmin))\n                {\n                    dmin = d;\n                    *imin = i;\n                    *jmin = j;\n                }\n            }\n\n            if (l < (k << 1))\n            {\n                i++;\n                cand++;\n            }\n            else if (l < (k << 2))\n            {\n                j++;\n                cand += lx;\n            }\n            else if (l < ((k << 2) + (k << 1)))\n            {\n                i--;\n                cand--;\n            }\n            else\n            {\n                j--;\n                cand -= lx;\n            }\n        }\n    }\n\n    return dmin;\n}\n\n#ifndef NO_INTER4V\n/*===============================================================================\n    Function:   fullsearchBlk\n    Date:       01/9/2001\n    Purpose:    Perform full-search motion estimation of an 8x8 block over the range\n                of search region in a spiral-outward manner centered at the 16x16 MV.\n    Input/Output:   VideoEncData, MB coordinate, pointer to the initial MV on the\n                reference, pointer to coor of current block, search range.\n===============================================================================*/\nInt fullsearchBlk(VideoEncData *video, Vol *currVol, UChar *cent, UChar *cur,\n                  Int *imin, Int *jmin, Int ilow, Int ihigh, Int jlow, Int jhigh, Int range)\n{\n    UChar *cand, *ref;\n    Int i, j, k, l, istart, jstart;\n    Int d, dmin;\n    Int lx = video->currVop->pitch; /* with padding */\n    Int(*SAD_Block)(UChar*, UChar*, Int, Int, void*) = video->functionPointer->SAD_Block;\n    void *extra_info = video->sad_extra_info;\n\n    OSCL_UNUSED_ARG(currVol);\n\n    /* starting point centered at 16x16 MV */\n    ref = cent;\n    istart = *imin;\n    jstart = *jmin;\n\n    dmin = (*SAD_Block)(ref, cur, 65536, lx, (void*)extra_info);\n\n    cand = ref;\n    /* perform spiral search */\n    for (k = 1; k <= range; k++)\n    {\n\n        i = istart - k;\n        j = jstart - k;\n        cand -= (lx + 1);  /* candidate region */\n\n        for (l = 0; l < 8*k; l++)\n        {\n            /* no need for boundary checking again */\n            if (i >= ilow && i <= ihigh && j >= jlow && j <= jhigh)\n            {\n                d = (*SAD_Block)(cand, cur, dmin, lx, (void*)extra_info);\n\n                if (d < dmin)\n                {\n                    dmin = d;\n                    *imin = i;\n                    *jmin = j;\n                }\n                else if ((d == dmin) &&\n                         PV_ABS(istart - i) + PV_ABS(jstart - j) < PV_ABS(istart - *imin) + PV_ABS(jstart - *jmin))\n                {\n                    dmin = d;\n                    *imin = i;\n                    *jmin = j;\n                }\n            }\n\n            if (l < (k << 1))\n            {\n                i++;\n                cand++;\n            }\n            else if (l < (k << 2))\n            {\n                j++;\n                cand += lx;\n            }\n            else if (l < ((k << 2) + (k << 1)))\n            {\n                i--;\n                cand--;\n            }\n            else\n            {\n                j--;\n                cand -= lx;\n            }\n        }\n    }\n\n    return dmin;\n}\n#endif /* NO_INTER4V */\n\n/*===============================================================================\n    Function:   CandidateSelection\n    Date:       09/16/2000\n    Purpose:    Fill up the list of candidate using spatio-temporal correlation\n                among neighboring blocks.\n    Input/Output:   type_pred = 0: first pass, 1: second pass, or no SCD\n    Modified:    09/23/01, get rid of redundant candidates before passing back.\n===============================================================================*/\n\nvoid CandidateSelection(Int *mvx, Int *mvy, Int *num_can, Int imb, Int jmb,\n                        VideoEncData *video, Int type_pred)\n{\n    MOT **mot = video->mot;\n    MOT *pmot;\n    Int mbnum = video->mbnum;\n    Vol *currVol = video->vol[video->currLayer];\n    Int mbwidth = currVol->nMBPerRow;\n    Int mbheight = currVol->nMBPerCol;\n    Int i, j, same, num1;\n\n    *num_can = 0;\n\n    if (video->forwardRefVop->predictionType == P_VOP)\n    {\n        /* Spatio-Temporal Candidate (five candidates) */\n        if (type_pred == 0) /* first pass */\n        {\n            pmot = &mot[mbnum][0]; /* same coordinate previous frame */\n            mvx[(*num_can)] = (pmot->x) >> 1;\n            mvy[(*num_can)++] = (pmot->y) >> 1;\n            if (imb >= (mbwidth >> 1) && imb > 0)  /*left neighbor previous frame */\n            {\n                pmot = &mot[mbnum-1][0];\n                mvx[(*num_can)] = (pmot->x) >> 1;\n                mvy[(*num_can)++] = (pmot->y) >> 1;\n            }\n            else if (imb + 1 < mbwidth)   /*right neighbor previous frame */\n            {\n                pmot = &mot[mbnum+1][0];\n                mvx[(*num_can)] = (pmot->x) >> 1;\n                mvy[(*num_can)++] = (pmot->y) >> 1;\n            }\n\n            if (jmb < mbheight - 1)  /*bottom neighbor previous frame */\n            {\n                pmot = &mot[mbnum+mbwidth][0];\n                mvx[(*num_can)] = (pmot->x) >> 1;\n                mvy[(*num_can)++] = (pmot->y) >> 1;\n            }\n            else if (jmb > 0)   /*upper neighbor previous frame */\n            {\n                pmot = &mot[mbnum-mbwidth][0];\n                mvx[(*num_can)] = (pmot->x) >> 1;\n                mvy[(*num_can)++] = (pmot->y) >> 1;\n            }\n\n            if (imb > 0 && jmb > 0)  /* upper-left neighbor current frame*/\n            {\n                pmot = &mot[mbnum-mbwidth-1][0];\n                mvx[(*num_can)] = (pmot->x) >> 1;\n                mvy[(*num_can)++] = (pmot->y) >> 1;\n            }\n            if (jmb > 0 && imb < mbheight - 1)  /* upper right neighbor current frame*/\n            {\n                pmot = &mot[mbnum-mbwidth+1][0];\n                mvx[(*num_can)] = (pmot->x) >> 1;\n                mvy[(*num_can)++] = (pmot->y) >> 1;\n            }\n        }\n        else    /* second pass */\n            /* original ST1 algorithm */\n        {\n            pmot = &mot[mbnum][0]; /* same coordinate previous frame */\n            mvx[(*num_can)] = (pmot->x) >> 1;\n            mvy[(*num_can)++] = (pmot->y) >> 1;\n\n            if (imb > 0)  /*left neighbor current frame */\n            {\n                pmot = &mot[mbnum-1][0];\n                mvx[(*num_can)] = (pmot->x) >> 1;\n                mvy[(*num_can)++] = (pmot->y) >> 1;\n            }\n            if (jmb > 0)  /*upper neighbor current frame */\n            {\n                pmot = &mot[mbnum-mbwidth][0];\n                mvx[(*num_can)] = (pmot->x) >> 1;\n                mvy[(*num_can)++] = (pmot->y) >> 1;\n            }\n            if (imb < mbwidth - 1)  /*right neighbor previous frame */\n            {\n                pmot = &mot[mbnum+1][0];\n                mvx[(*num_can)] = (pmot->x) >> 1;\n                mvy[(*num_can)++] = (pmot->y) >> 1;\n            }\n            if (jmb < mbheight - 1)  /*bottom neighbor previous frame */\n            {\n                pmot = &mot[mbnum+mbwidth][0];\n                mvx[(*num_can)] = (pmot->x) >> 1;\n                mvy[(*num_can)++] = (pmot->y) >> 1;\n            }\n        }\n    }\n    else  /* only Spatial Candidate (four candidates)*/\n    {\n        if (type_pred == 0) /*first pass*/\n        {\n            if (imb > 1)  /* neighbor two blocks away to the left */\n            {\n                pmot = &mot[mbnum-2][0];\n                mvx[(*num_can)] = (pmot->x) >> 1;\n                mvy[(*num_can)++] = (pmot->y) >> 1;\n            }\n            if (imb > 0 && jmb > 0)  /* upper-left neighbor */\n            {\n                pmot = &mot[mbnum-mbwidth-1][0];\n                mvx[(*num_can)] = (pmot->x) >> 1;\n                mvy[(*num_can)++] = (pmot->y) >> 1;\n            }\n            if (jmb > 0 && imb < mbheight - 1)  /* upper right neighbor */\n            {\n                pmot = &mot[mbnum-mbwidth+1][0];\n                mvx[(*num_can)] = (pmot->x) >> 1;\n                mvy[(*num_can)++] = (pmot->y) >> 1;\n            }\n        }\n//#ifdef SCENE_CHANGE_DETECTION\n        /* second pass (ST2 algorithm)*/\n        else if (type_pred == 1) /* 4/7/01 */\n        {\n            if (imb > 0)  /*left neighbor current frame */\n            {\n                pmot = &mot[mbnum-1][0];\n                mvx[(*num_can)] = (pmot->x) >> 1;\n                mvy[(*num_can)++] = (pmot->y) >> 1;\n            }\n            if (jmb > 0)  /*upper neighbor current frame */\n            {\n                pmot = &mot[mbnum-mbwidth][0];\n                mvx[(*num_can)] = (pmot->x) >> 1;\n                mvy[(*num_can)++] = (pmot->y) >> 1;\n            }\n            if (imb < mbwidth - 1)  /*right neighbor current frame */\n            {\n                pmot = &mot[mbnum+1][0];\n                mvx[(*num_can)] = (pmot->x) >> 1;\n                mvy[(*num_can)++] = (pmot->y) >> 1;\n            }\n            if (jmb < mbheight - 1)  /*bottom neighbor current frame */\n            {\n                pmot = &mot[mbnum+mbwidth][0];\n                mvx[(*num_can)] = (pmot->x) >> 1;\n                mvy[(*num_can)++] = (pmot->y) >> 1;\n            }\n        }\n//#else\n        else /* original ST1 algorithm */\n        {\n            if (imb > 0)  /*left neighbor current frame */\n            {\n                pmot = &mot[mbnum-1][0];\n                mvx[(*num_can)] = (pmot->x) >> 1;\n                mvy[(*num_can)++] = (pmot->y) >> 1;\n\n                if (jmb > 0)  /*upper-left neighbor current frame */\n                {\n                    pmot = &mot[mbnum-mbwidth-1][0];\n                    mvx[(*num_can)] = (pmot->x) >> 1;\n                    mvy[(*num_can)++] = (pmot->y) >> 1;\n                }\n\n            }\n            if (jmb > 0)  /*upper neighbor current frame */\n            {\n                pmot = &mot[mbnum-mbwidth][0];\n                mvx[(*num_can)] = (pmot->x) >> 1;\n                mvy[(*num_can)++] = (pmot->y) >> 1;\n\n                if (imb < mbheight - 1)  /*upper-right neighbor current frame */\n                {\n                    pmot = &mot[mbnum-mbwidth+1][0];\n                    mvx[(*num_can)] = (pmot->x) >> 1;\n                    mvy[(*num_can)++] = (pmot->y) >> 1;\n                }\n            }\n        }\n//#endif\n    }\n\n    /* 3/23/01, remove redundant candidate (possible k-mean) */\n    num1 = *num_can;\n    *num_can = 1;\n    for (i = 1; i < num1; i++)\n    {\n        same = 0;\n        j = 0;\n        while (!same && j < *num_can)\n        {\n#if (CANDIDATE_DISTANCE==0)\n            if (mvx[i] == mvx[j] && mvy[i] == mvy[j])\n#else\n            // modified k-mean, 3/24/01, shouldn't be greater than 3\n            if (PV_ABS(mvx[i] - mvx[j]) + PV_ABS(mvy[i] - mvy[j]) < CANDIDATE_DISTANCE)\n#endif\n                same = 1;\n            j++;\n        }\n        if (!same)\n        {\n            mvx[*num_can] = mvx[i];\n            mvy[*num_can] = mvy[i];\n            (*num_can)++;\n        }\n    }\n\n#ifdef _SAD_STAT\n    num_cand += (*num_can);\n#endif\n\n    if (num1 == 5 && *num_can == 1)\n        *num_can = ALL_CAND_EQUAL; /* all are equal */\n\n    return ;\n}\n\n/*===========================================================================\n    Function:   RasterIntraUpdate\n    Date:       2/26/01\n    Purpose:    To raster-scan assign INTRA-update .\n                N macroblocks are updated (also was programmable).\n===========================================================================*/\nvoid RasterIntraUpdate(UChar *intraArray, UChar *Mode, Int totalMB, Int numRefresh)\n{\n    Int indx, i;\n\n    /* find the last refresh MB */\n    indx = 0;\n    while (intraArray[indx] == 1 && indx < totalMB)\n        indx++;\n\n    /* add more  */\n    for (i = 0; i < numRefresh && indx < totalMB; i++)\n    {\n        Mode[indx] = MODE_INTRA;\n        intraArray[indx++] = 1;\n    }\n\n    /* if read the end of frame, reset and loop around */\n    if (indx >= totalMB - 1)\n    {\n        ResetIntraUpdate(intraArray, totalMB);\n        indx = 0;\n        while (i < numRefresh && indx < totalMB)\n        {\n            intraArray[indx] = 1;\n            Mode[indx++] = MODE_INTRA;\n            i++;\n        }\n    }\n\n    return ;\n}\n\n/*===========================================================================\n    Function:   ResetIntraUpdate\n    Date:       11/28/00\n    Purpose:    Reset already intra updated flags to all zero\n===========================================================================*/\n\nvoid ResetIntraUpdate(UChar *intraArray, Int totalMB)\n{\n    M4VENC_MEMSET(intraArray, 0, sizeof(UChar)*totalMB);\n    return ;\n}\n\n/*===========================================================================\n    Function:   ResetIntraUpdateRegion\n    Date:       12/1/00\n    Purpose:    Reset already intra updated flags in one region to all zero\n===========================================================================*/\nvoid ResetIntraUpdateRegion(UChar *intraArray, Int start_i, Int rwidth,\n                            Int start_j, Int rheight, Int mbwidth, Int mbheight)\n{\n    Int indx, j;\n\n    if (start_i + rwidth >= mbwidth)\n        rwidth = mbwidth - start_i;\n    if (start_j + rheight >= mbheight)\n        rheight = mbheight - start_j;\n\n    for (j = start_j; j < start_j + rheight; j++)\n    {\n        indx = j * mbwidth;\n        M4VENC_MEMSET(intraArray + indx + start_i, 0, sizeof(UChar)*rwidth);\n    }\n\n    return ;\n}\n\n/*************************************************************\n    Function:   MoveNeighborSAD\n    Date:       3/27/01\n    Purpose:    Move neighboring SAD around when center has shifted\n*************************************************************/\n\nvoid MoveNeighborSAD(Int dn[], Int new_loc)\n{\n    Int tmp[9];\n    tmp[0] = dn[0];\n    tmp[1] = dn[1];\n    tmp[2] = dn[2];\n    tmp[3] = dn[3];\n    tmp[4] = dn[4];\n    tmp[5] = dn[5];\n    tmp[6] = dn[6];\n    tmp[7] = dn[7];\n    tmp[8] = dn[8];\n    dn[0] = dn[1] = dn[2] = dn[3] = dn[4] = dn[5] = dn[6] = dn[7] = dn[8] = 65536;\n\n    switch (new_loc)\n    {\n        case 0:\n            break;\n        case 1:\n            dn[4] = tmp[2];\n            dn[5] = tmp[0];\n            dn[6] = tmp[8];\n            break;\n        case 2:\n            dn[4] = tmp[3];\n            dn[5] = tmp[4];\n            dn[6] = tmp[0];\n            dn[7] = tmp[8];\n            dn[8] = tmp[1];\n            break;\n        case 3:\n            dn[6] = tmp[4];\n            dn[7] = tmp[0];\n            dn[8] = tmp[2];\n            break;\n        case 4:\n            dn[1] = tmp[2];\n            dn[2] = tmp[3];\n            dn[6] = tmp[5];\n            dn[7] = tmp[6];\n            dn[8] = tmp[0];\n            break;\n        case 5:\n            dn[1] = tmp[0];\n            dn[2] = tmp[4];\n            dn[8] = tmp[6];\n            break;\n        case 6:\n            dn[1] = tmp[8];\n            dn[2] = tmp[0];\n            dn[3] = tmp[4];\n            dn[4] = tmp[5];\n            dn[8] = tmp[7];\n            break;\n        case 7:\n            dn[2] = tmp[8];\n            dn[3] = tmp[0];\n            dn[4] = tmp[6];\n            break;\n        case 8:\n            dn[2] = tmp[1];\n            dn[3] = tmp[2];\n            dn[4] = tmp[0];\n            dn[5] = tmp[6];\n            dn[6] = tmp[7];\n            break;\n    }\n    dn[0] = tmp[new_loc];\n\n    return ;\n}\n\n/* 3/28/01, find minimal of dn[9] */\n\nInt FindMin(Int dn[])\n{\n    Int min, i;\n    Int dmin;\n\n    dmin = dn[1];\n    min = 1;\n    for (i = 2; i < 9; i++)\n    {\n        if (dn[i] < dmin)\n        {\n            dmin = dn[i];\n            min = i;\n        }\n    }\n\n    return min;\n}\n\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/mp4def.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef _PVDECDEF_H_\n#define _PVDECDEF_H_\n\n/********** platform dependent in-line assembly *****************************/\n\n/*************** Intel *****************/\n\n/*************** ARM *****************/\n/* for general ARM instruction. #define __ARM has to be defined in compiler set up.*/\n/* for DSP MUL */\n#ifdef __TARGET_FEATURE_DSPMUL\n#define _ARM_DSP_MUL\n#endif\n\n/* for Count Leading Zero instruction */\n#ifdef __TARGET_ARCH_5T\n#define _ARM_CLZ\n#endif\n#ifdef __TARGET_ARCH_5TE\n#define _ARM_CLZ\n#endif\n/****************************************************************************/\n\n#ifndef _PV_TYPES_\n#define _PV_TYPES_\ntypedef unsigned char UChar;\ntypedef char Char;\ntypedef unsigned int UInt;\ntypedef int Int;\ntypedef unsigned short UShort;\ntypedef short Short;\ntypedef short int SInt;\ntypedef unsigned int Bool;\ntypedef unsigned long   ULong;\ntypedef void Void;\n\n#define PV_CODEC_INIT       0\n#define PV_CODEC_STOP       1\n#define PV_CODEC_RUNNING    2\n#define PV_CODEC_RESET      3\n#endif\n\ntypedef enum\n{\n    PV_SUCCESS,\n    PV_FAIL,\n    PV_EOS,             /* hit End_Of_Sequence     */\n    PV_MB_STUFFING,     /* hit Macroblock_Stuffing */\n    PV_END_OF_VOP,      /* hit End_of_Video_Object_Plane */\n    PV_END_OF_MB,       /* hit End_of_Macroblock */\n    PV_END_OF_BUF       /* hit End_of_Bitstream_Buffer */\n} PV_STATUS;\n\ntypedef UChar PIXEL;\n//typedef Int MOT;   /* : \"int\" type runs faster on RISC machine */\n\n#define HTFM            /*  3/2/01, Hypothesis Test Fast Matching for early drop-out*/\n//#define _MOVE_INTERFACE\n\n//#define RANDOM_REFSELCODE\n\n/* handle the case of devision by zero in RC */\n#define MAD_MIN 1\n\n/* 4/11/01, if SSE or MMX, no HTFM, no SAD_HP_FLY */\n\n/* Code size reduction related Macros */\n#ifdef H263_ONLY\n#ifndef NO_RVLC\n#define NO_RVLC\n#endif\n#ifndef NO_MPEG_QUANT\n#define NO_MPEG_QUANT\n#endif\n#ifndef NO_INTER4V\n#define NO_INTER4V\n#endif\n#endif\n/**************************************/\n\n#define TRUE    1\n#define FALSE   0\n\n#define PV_ABS(x)       (((x)<0)? -(x) : (x))\n#define PV_SIGN(x)      (((x)<0)? -1 : 1)\n#define PV_SIGN0(a)     (((a)<0)? -1 : (((a)>0) ? 1 : 0))\n#define PV_MAX(a,b)     ((a)>(b)? (a):(b))\n#define PV_MIN(a,b)     ((a)<(b)? (a):(b))\n\n#define MODE_INTRA      0\n#define MODE_INTER      1\n#define MODE_INTRA_Q    2\n#define MODE_INTER_Q    3\n#define MODE_INTER4V    4\n#define MODE_SKIPPED    6\n\n#define I_VOP       0\n#define P_VOP       1\n#define B_VOP       2\n\n/*09/04/00 Add MB height and width */\n#define MB_WIDTH 16\n#define MB_HEIGHT 16\n\n#define VOP_BRIGHT_WHITEENC 255\n\n\n#define LUMINANCE_DC_TYPE   1\n#define CHROMINANCE_DC_TYPE 2\n\n#define EOB_CODE                        1\n#define EOB_CODE_LENGTH                32\n\n/* 11/30/98 */\n#define FoundRM     1   /* Resync Marker */\n#define FoundVSC    2   /* VOP_START_CODE. */\n#define FoundGSC    3   /* GROUP_START_CODE */\n#define FoundEOB    4   /* EOB_CODE */\n\n\n/* 05/08/2000, the error code returned from BitstreamShowBits() */\n#define BITSTREAM_ERROR_CODE 0xFFFFFFFF\n\n/* PacketVideo \"absolution timestamp\" object.  06/13/2000 */\n#define PVTS_START_CODE         0x01C4\n#define PVTS_START_CODE_LENGTH  32\n\n/* session layer and vop layer start codes */\n\n#define SESSION_START_CODE  0x01B0\n#define SESSION_END_CODE    0x01B1\n#define VISUAL_OBJECT_START_CODE 0x01B5\n\n#define VO_START_CODE           0x8\n#define VO_HEADER_LENGTH        32      /* lengtho of VO header: VO_START_CODE +  VO_ID */\n\n#define SOL_START_CODE          0x01BE\n#define SOL_START_CODE_LENGTH   32\n\n#define VOL_START_CODE 0x12\n#define VOL_START_CODE_LENGTH 28\n\n#define VOP_START_CODE 0x1B6\n#define VOP_START_CODE_LENGTH   32\n\n#define GROUP_START_CODE    0x01B3\n#define GROUP_START_CODE_LENGTH  32\n\n#define VOP_ID_CODE_LENGTH      5\n#define VOP_TEMP_REF_CODE_LENGTH    16\n\n#define USER_DATA_START_CODE        0x01B2\n#define USER_DATA_START_CODE_LENGTH 32\n\n#define START_CODE_PREFIX       0x01\n#define START_CODE_PREFIX_LENGTH    24\n\n#define SHORT_VIDEO_START_MARKER         0x20\n#define SHORT_VIDEO_START_MARKER_LENGTH  22\n#define SHORT_VIDEO_END_MARKER            0x3F\n#define GOB_RESYNC_MARKER         0x01\n#define GOB_RESYNC_MARKER_LENGTH  17\n\n/* motion and resync markers used in error resilient mode  */\n\n#define DC_MARKER                      438273\n#define DC_MARKER_LENGTH                19\n\n#define MOTION_MARKER_COMB             126977\n#define MOTION_MARKER_COMB_LENGTH       17\n\n#define MOTION_MARKER_SEP              81921\n#define MOTION_MARKER_SEP_LENGTH        17\n\n#define RESYNC_MARKER           1\n#define RESYNC_MARKER_LENGTH    17\n\n#define SPRITE_NOT_USED     0\n#define STATIC_SPRITE       1\n#define ONLINE_SPRITE       2\n#define GMC_SPRITE      3\n\n/* macroblock and block size */\n#define MB_SIZE 16\n#define NCOEFF_MB (MB_SIZE*MB_SIZE)\n#define B_SIZE 8\n#define NCOEFF_BLOCK (B_SIZE*B_SIZE)\n#define NCOEFF_Y NCOEFF_MB\n#define NCOEFF_U NCOEFF_BLOCK\n#define NCOEFF_V NCOEFF_BLOCK\n\n/* overrun buffer size  */\n#define DEFAULT_OVERRUN_BUFFER_SIZE 1000\n\n\n/* VLC decoding related definitions */\n#define VLC_ERROR   (-1)\n#define VLC_ESCAPE  7167\n\n#endif /* _PVDECDEF_H_ */\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/mp4enc_api.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2010 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n\n#include \"mp4enc_lib.h\"\n#include \"bitstream_io.h\"\n#include \"rate_control.h\"\n#include \"m4venc_oscl.h\"\n\n\n/* Inverse normal zigzag */\nconst static Int zigzag_i[NCOEFF_BLOCK] =\n{\n    0, 1, 8, 16, 9, 2, 3, 10,\n    17, 24, 32, 25, 18, 11, 4, 5,\n    12, 19, 26, 33, 40, 48, 41, 34,\n    27, 20, 13, 6, 7, 14, 21, 28,\n    35, 42, 49, 56, 57, 50, 43, 36,\n    29, 22, 15, 23, 30, 37, 44, 51,\n    58, 59, 52, 45, 38, 31, 39, 46,\n    53, 60, 61, 54, 47, 55, 62, 63\n};\n\n/* INTRA */\nconst static Int mpeg_iqmat_def[NCOEFF_BLOCK] =\n    {  8, 17, 18, 19, 21, 23, 25, 27,\n       17, 18, 19, 21, 23, 25, 27, 28,\n       20, 21, 22, 23, 24, 26, 28, 30,\n       21, 22, 23, 24, 26, 28, 30, 32,\n       22, 23, 24, 26, 28, 30, 32, 35,\n       23, 24, 26, 28, 30, 32, 35, 38,\n       25, 26, 28, 30, 32, 35, 38, 41,\n       27, 28, 30, 32, 35, 38, 41, 45\n    };\n\n/* INTER */\nconst static Int mpeg_nqmat_def[64]  =\n    { 16, 17, 18, 19, 20, 21, 22, 23,\n      17, 18, 19, 20, 21, 22, 23, 24,\n      18, 19, 20, 21, 22, 23, 24, 25,\n      19, 20, 21, 22, 23, 24, 26, 27,\n      20, 21, 22, 23, 25, 26, 27, 28,\n      21, 22, 23, 24, 26, 27, 28, 30,\n      22, 23, 24, 26, 27, 28, 30, 31,\n      23, 24, 25, 27, 28, 30, 31, 33\n    };\n\n/* Profiles and levels */\n/* Simple profile(level 0-3) and Core profile (level 1-2) */\n/* {SPL0, SPL1, SPL2, SPL3, SPL4a, SPL5, CPL1, CPL2} , SPL0: Simple Profile@Level0, CPL1: Core Profile@Level1 */\nconst static Int profile_level_code[MAX_BASE_PROFILE+1] =\n{\n    0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x21, 0x22\n};\n\nconst static Int profile_level_max_bitrate[MAX_BASE_PROFILE+1] =\n{\n    64000, 64000, 128000, 384000, 4000000, 8000000, 384000, 2000000\n};\n\nconst static Int profile_level_max_packet_size[MAX_BASE_PROFILE+1] =\n{\n    2048, 2048, 4096, 8192, 16384, 16384, 4096, 8192\n};\n\nconst static Int profile_level_max_mbsPerSec[MAX_BASE_PROFILE+1] =\n{\n    1485, 1485, 5940, 11880, 36000, 40500, 5940, 23760\n};\n\nconst static Int profile_level_max_VBV_size[MAX_BASE_PROFILE+1] =\n{\n    163840, 163840, 655360, 655360, 1310720, 1835008, 262144, 1310720\n};\n\n\n/* Scalable profiles for nLayers = 2 */\n/* Simple scalable profile (level 0-2) and Core scalable profile (level 1-3) */\n/* {SSPL0, SSPL1, SSPL2, CSPL1, CSPL2, CSPL3} , SSPL0: Simple Scalable Profile@Level0, CSPL1: Core Scalable Profile@Level1, the fourth is redundant for easy table manipulation */\n\nconst static Int scalable_profile_level_code[MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE] =\n{\n    0x10, 0x11, 0x12, 0xA1, 0xA2, 0xA3\n};\n\nconst static Int scalable_profile_level_max_bitrate[MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE] =\n{\n    128000, 128000, 256000, 768000, 1500000, 4000000\n};\n\n/* in bits */\nconst static Int scalable_profile_level_max_packet_size[MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE] =\n{\n    2048, 2048, 4096, 4096, 4096, 16384\n};\n\nconst static Int scalable_profile_level_max_mbsPerSec[MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE] =\n{\n    1485, 7425, 23760, 14850, 29700, 120960\n};\n\nconst static Int scalable_profile_level_max_VBV_size[MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE] =\n{\n    163840, 655360, 655360, 1048576, 1310720, 1310720\n};\n\n\n/* H263 profile 0 @ level 10-70 */\nconst static Int   h263Level[8] = {0, 10, 20, 30, 40, 50, 60, 70};\nconst static float rBR_bound[8] = {0, 1, 2, 6, 32, 64, 128, 256};\nconst static float max_h263_framerate[2] = {(float)30000 / (float)2002,\n        (float)30000 / (float)1001\n                                           };\nconst static Int   max_h263_width[2]  = {176, 352};\nconst static Int   max_h263_height[2] = {144, 288};\n\n/* 6/2/2001, newly added functions to make PVEncodeVop more readable. */\nInt DetermineCodingLayer(VideoEncData *video, Int *nLayer, ULong modTime);\nvoid DetermineVopType(VideoEncData *video, Int currLayer);\nInt UpdateSkipNextFrame(VideoEncData *video, ULong *modTime, Int *size, PV_STATUS status);\nBool SetProfile_BufferSize(VideoEncData *video, float delay, Int bInitialized);\n\n#ifdef PRINT_RC_INFO\nextern FILE *facct;\nextern int tiTotalNumBitsGenerated;\nextern int iStuffBits;\n#endif\n\n#ifdef PRINT_EC\nextern FILE *fec;\n#endif\n\n\n/* ======================================================================== */\n/*  Function : PVGetDefaultEncOption()                                      */\n/*  Date     : 12/12/2005                                                   */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF Bool PVGetDefaultEncOption(VideoEncOptions *encOption, Int encUseCase)\n{\n    VideoEncOptions defaultUseCase = {H263_MODE, profile_level_max_packet_size[SIMPLE_PROFILE_LEVEL0] >> 3,\n                                      SIMPLE_PROFILE_LEVEL0, PV_OFF, 0, 1, 1000, 33, {144, 144}, {176, 176}, {15, 30}, {64000, 128000},\n                                      {10, 10}, {12, 12}, {0, 0}, CBR_1, 0.0, PV_OFF, -1, 0, PV_OFF, 16, PV_OFF, 0, PV_ON\n                                     };\n\n    OSCL_UNUSED_ARG(encUseCase); // unused for now. Later we can add more defaults setting and use this\n    // argument to select the right one.\n    /* in the future we can create more meaningful use-cases */\n    if (encOption == NULL)\n    {\n        return PV_FALSE;\n    }\n\n    M4VENC_MEMCPY(encOption, &defaultUseCase, sizeof(VideoEncOptions));\n\n    return PV_TRUE;\n}\n\n/* ======================================================================== */\n/*  Function : PVInitVideoEncoder()                                         */\n/*  Date     : 08/22/2000                                                   */\n/*  Purpose  : Initialization of MP4 Encoder and VO bitstream               */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :  5/21/01, allocate only yChan and assign uChan & vChan   */\n/*              12/12/05, add encoding option as input argument         */\n/* ======================================================================== */\nOSCL_EXPORT_REF Bool    PVInitVideoEncoder(VideoEncControls *encoderControl, VideoEncOptions *encOption)\n{\n\n    Bool        status = PV_TRUE;\n    Int         nLayers, idx, i, j;\n    Int         max = 0, max_width = 0, max_height = 0, pitch, offset;\n    Int         size = 0, nTotalMB = 0;\n    VideoEncData *video;\n    Vol         *pVol;\n    VideoEncParams  *pEncParams;\n    Int         temp_w, temp_h, mbsPerSec;\n\n    /******************************************/\n    /*      this part use to be PVSetEncode() */\n    Int profile_table_index, *profile_level_table;\n    Int profile_level = encOption->profile_level;\n    Int PacketSize = encOption->packetSize << 3;\n    Int timeInc, timeIncRes;\n    float profile_max_framerate;\n    VideoEncParams *encParams;\n\n    if (encoderControl->videoEncoderData) /* this has been called */\n    {\n        if (encoderControl->videoEncoderInit) /* check if PVInitVideoEncoder() has been called  */\n        {\n            PVCleanUpVideoEncoder(encoderControl);\n            encoderControl->videoEncoderInit = 0;\n        }\n\n        M4VENC_FREE(encoderControl->videoEncoderData);\n        encoderControl->videoEncoderData = NULL;\n    }\n    encoderControl->videoEncoderInit = 0;   /* reset this value */\n\n    video = (VideoEncData *)M4VENC_MALLOC(sizeof(VideoEncData)); /* allocate memory for encData */\n\n    if (video == NULL)\n        return PV_FALSE;\n\n    M4VENC_MEMSET(video, 0, sizeof(VideoEncData));\n\n    encoderControl->videoEncoderData = (void *) video;         /* set up pointer in VideoEncData structure */\n\n    video->encParams = (VideoEncParams *)M4VENC_MALLOC(sizeof(VideoEncParams));\n    if (video->encParams == NULL)\n        goto CLEAN_UP;\n\n    M4VENC_MEMSET(video->encParams, 0, sizeof(VideoEncParams));\n\n    encParams = video->encParams;\n    encParams->nLayers = encOption->numLayers;\n\n    /* Check whether the input packetsize is valid (Note: put code here (before any memory allocation) in order to avoid memory leak */\n    if ((Int)profile_level <= (Int)(MAX_BASE_PROFILE))  /* non-scalable profile */\n    {\n        profile_level_table = (Int *)profile_level_max_packet_size;\n        profile_table_index = (Int)profile_level;\n        if (encParams->nLayers != 1)\n        {\n            goto CLEAN_UP;\n        }\n\n        encParams->LayerMaxMbsPerSec[0] = profile_level_max_mbsPerSec[profile_table_index];\n\n    }\n    else   /* scalable profile */\n    {\n        profile_level_table = (Int *)scalable_profile_level_max_packet_size;\n        profile_table_index = (Int)profile_level - (Int)(MAX_BASE_PROFILE) - 1;\n        if (encParams->nLayers < 2)\n        {\n            goto CLEAN_UP;\n        }\n        for (i = 0; i < encParams->nLayers; i++)\n        {\n            encParams->LayerMaxMbsPerSec[i] = scalable_profile_level_max_mbsPerSec[profile_table_index];\n        }\n\n    }\n\n    /* cannot have zero size packet with these modes */\n    if (PacketSize == 0)\n    {\n        if (encOption->encMode == DATA_PARTITIONING_MODE)\n        {\n            goto CLEAN_UP;\n        }\n        if (encOption->encMode == COMBINE_MODE_WITH_ERR_RES)\n        {\n            encOption->encMode = COMBINE_MODE_NO_ERR_RES;\n        }\n    }\n\n    if (encOption->gobHeaderInterval == 0)\n    {\n        if (encOption->encMode == H263_MODE_WITH_ERR_RES)\n        {\n            encOption->encMode = H263_MODE;\n        }\n\n        if (encOption->encMode == SHORT_HEADER_WITH_ERR_RES)\n        {\n            encOption->encMode = SHORT_HEADER;\n        }\n    }\n\n    if (PacketSize > profile_level_table[profile_table_index])\n        goto CLEAN_UP;\n\n    /* Initial Defaults for all Modes */\n\n    encParams->SequenceStartCode = 1;\n    encParams->GOV_Enabled = 0;\n    encParams->RoundingType = 0;\n    encParams->IntraDCVlcThr = PV_MAX(PV_MIN(encOption->intraDCVlcTh, 7), 0);\n    encParams->ACDCPrediction = ((encOption->useACPred == PV_ON) ? TRUE : FALSE);\n    encParams->RC_Type = encOption->rcType;\n    encParams->Refresh = encOption->numIntraMB;\n    encParams->ResyncMarkerDisable = 0; /* Enable Resync Marker */\n\n    for (i = 0; i < encOption->numLayers; i++)\n    {\n#ifdef NO_MPEG_QUANT\n        encParams->QuantType[i] = 0;\n#else\n        encParams->QuantType[i] = encOption->quantType[i];      /* H263 */\n#endif\n        if (encOption->pQuant[i] >= 1 && encOption->pQuant[i] <= 31)\n        {\n            encParams->InitQuantPvop[i] = encOption->pQuant[i];\n        }\n        else\n        {\n            goto CLEAN_UP;\n        }\n        if (encOption->iQuant[i] >= 1 && encOption->iQuant[i] <= 31)\n        {\n            encParams->InitQuantIvop[i] = encOption->iQuant[i];\n        }\n        else\n        {\n            goto CLEAN_UP;\n        }\n    }\n\n    encParams->HalfPel_Enabled = 1;\n    encParams->SearchRange = encOption->searchRange; /* 4/16/2001 */\n    encParams->FullSearch_Enabled = 0;\n#ifdef NO_INTER4V\n    encParams->MV8x8_Enabled = 0;\n#else\n    encParams->MV8x8_Enabled = 0;// comment out for now!! encOption->mv8x8Enable;\n#endif\n    encParams->H263_Enabled = 0;\n    encParams->GOB_Header_Interval = 0; // need to be reset to 0\n    encParams->IntraPeriod = encOption->intraPeriod;    /* Intra update period update default*/\n    encParams->SceneChange_Det = encOption->sceneDetect;\n    encParams->FineFrameSkip_Enabled = 0;\n    encParams->NoFrameSkip_Enabled = encOption->noFrameSkipped;\n    encParams->NoPreSkip_Enabled = encOption->noFrameSkipped;\n    encParams->GetVolHeader[0] = 0;\n    encParams->GetVolHeader[1] = 0;\n    encParams->ResyncPacketsize = encOption->packetSize << 3;\n    encParams->LayerMaxBitRate[0] = 0;\n    encParams->LayerMaxBitRate[1] = 0;\n    encParams->LayerMaxFrameRate[0] = (float)0.0;\n    encParams->LayerMaxFrameRate[1] = (float)0.0;\n    encParams->VBV_delay = encOption->vbvDelay;  /* 2sec VBV buffer size */\n\n    switch (encOption->encMode)\n    {\n\n        case SHORT_HEADER:\n        case SHORT_HEADER_WITH_ERR_RES:\n\n            /* From Table 6-26 */\n            encParams->nLayers = 1;\n            encParams->QuantType[0] = 0;    /*H263 */\n            encParams->ResyncMarkerDisable = 1; /* Disable Resync Marker */\n            encParams->DataPartitioning = 0; /* Combined Mode */\n            encParams->ReversibleVLC = 0;   /* Disable RVLC */\n            encParams->RoundingType = 0;\n            encParams->IntraDCVlcThr = 7;   /* use_intra_dc_vlc = 0 */\n            encParams->MV8x8_Enabled = 0;\n\n            encParams->GOB_Header_Interval = encOption->gobHeaderInterval;\n            encParams->H263_Enabled = 2;\n            encParams->GOV_Enabled = 0;\n            encParams->TimeIncrementRes = 30000;        /* timeIncrementRes for H263 */\n            break;\n\n        case H263_MODE:\n        case H263_MODE_WITH_ERR_RES:\n\n            /* From Table 6-26 */\n            encParams->nLayers = 1;\n            encParams->QuantType[0] = 0;    /*H263 */\n            encParams->ResyncMarkerDisable = 1; /* Disable Resync Marker */\n            encParams->DataPartitioning = 0; /* Combined Mode */\n            encParams->ReversibleVLC = 0;   /* Disable RVLC */\n            encParams->RoundingType = 0;\n            encParams->IntraDCVlcThr = 7;   /* use_intra_dc_vlc = 0 */\n            encParams->MV8x8_Enabled = 0;\n\n            encParams->H263_Enabled = 1;\n            encParams->GOV_Enabled = 0;\n            encParams->TimeIncrementRes = 30000;        /* timeIncrementRes for H263 */\n\n            break;\n#ifndef H263_ONLY\n        case DATA_PARTITIONING_MODE:\n\n            encParams->DataPartitioning = 1;        /* Base Layer Data Partitioning */\n            encParams->ResyncMarkerDisable = 0; /* Resync Marker */\n#ifdef NO_RVLC\n            encParams->ReversibleVLC = 0;\n#else\n            encParams->ReversibleVLC = (encOption->rvlcEnable == PV_ON); /* RVLC when Data Partitioning */\n#endif\n            encParams->ResyncPacketsize = PacketSize;\n            break;\n\n        case COMBINE_MODE_WITH_ERR_RES:\n\n            encParams->DataPartitioning = 0;        /* Combined Mode */\n            encParams->ResyncMarkerDisable = 0; /* Resync Marker */\n            encParams->ReversibleVLC = 0;           /* No RVLC */\n            encParams->ResyncPacketsize = PacketSize;\n            break;\n\n        case COMBINE_MODE_NO_ERR_RES:\n\n            encParams->DataPartitioning = 0;        /* Combined Mode */\n            encParams->ResyncMarkerDisable = 1; /* Disable Resync Marker */\n            encParams->ReversibleVLC = 0;           /* No RVLC */\n            break;\n#endif\n        default:\n            goto CLEAN_UP;\n    }\n    /* Set the constraints (maximum values) according to the input profile and level */\n    /* Note that profile_table_index is already figured out above */\n\n    /* base layer */\n    encParams->profile_table_index    = profile_table_index; /* Used to limit the profile and level in SetProfile_BufferSize() */\n\n    /* check timeIncRes */\n    timeIncRes = encOption->timeIncRes;\n    timeInc = encOption->tickPerSrc;\n\n    if ((timeIncRes >= 1) && (timeIncRes <= 65536) && (timeInc < timeIncRes) && (timeInc != 0))\n    {\n\t\t/* AGI RCS 08/12/09 */\n    \tencParams->TimeIncrementRes = timeIncRes;\n        video->FrameRate = timeIncRes / ((float)timeInc);\n    }\n    else\n    {\n        goto CLEAN_UP;\n    }\n\n    /* check frame dimension */\n    if (encParams->H263_Enabled)\n    {\n        switch (encOption->encWidth[0])\n        {\n            case 128:\n                if (encOption->encHeight[0] != 96) /* source_format = 1 */\n                    goto CLEAN_UP;\n                break;\n            case 176:\n                if (encOption->encHeight[0] != 144) /* source_format = 2 */\n                    goto CLEAN_UP;\n                break;\n            case 352:\n                if (encOption->encHeight[0] != 288) /* source_format = 2 */\n                    goto CLEAN_UP;\n                break;\n\n            case 704:\n                if (encOption->encHeight[0] != 576) /* source_format = 2 */\n                    goto CLEAN_UP;\n                break;\n            case 1408:\n                if (encOption->encHeight[0] != 1152) /* source_format = 2 */\n                    goto CLEAN_UP;\n                break;\n\n            default:\n                goto CLEAN_UP;\n        }\n    }\n    for (i = 0; i < encParams->nLayers; i++)\n    {\n        encParams->LayerHeight[i] = encOption->encHeight[i];\n        encParams->LayerWidth[i] = encOption->encWidth[i];\n    }\n\n    /* check frame rate */\n    for (i = 0; i < encParams->nLayers; i++)\n    {\n        encParams->LayerFrameRate[i] = encOption->encFrameRate[i];\n    }\n\n    if (encParams->nLayers > 1)\n    {\n        if (encOption->encFrameRate[0] == encOption->encFrameRate[1] ||\n                encOption->encFrameRate[0] == 0. || encOption->encFrameRate[1] == 0.) /* 7/31/03 */\n            goto CLEAN_UP;\n    }\n    /* set max frame rate */\n    for (i = 0; i < encParams->nLayers; i++)\n    {\n\n        /* Make sure the maximum framerate is consistent with the given profile and level */\n        nTotalMB = ((encParams->LayerWidth[i] + 15) / 16) * ((encParams->LayerHeight[i] + 15) / 16);\n\n        if (nTotalMB > 0)\n            profile_max_framerate = (float)encParams->LayerMaxMbsPerSec[i] / (float)nTotalMB;\n\n        else\n            profile_max_framerate = (float)30.0;\n\n        encParams->LayerMaxFrameRate[i] = PV_MIN(profile_max_framerate, encParams->LayerFrameRate[i]);\n    }\n\n    /* check bit rate */\n    /* set max bit rate */\n    for (i = 0; i < encParams->nLayers; i++)\n    {\n        encParams->LayerBitRate[i] = encOption->bitRate[i];\n        encParams->LayerMaxBitRate[i] = encOption->bitRate[i];\n    }\n    if (encParams->nLayers > 1)\n    {\n        if (encOption->bitRate[0] == encOption->bitRate[1] ||\n                encOption->bitRate[0] == 0 || encOption->bitRate[1] == 0) /* 7/31/03 */\n            goto CLEAN_UP;\n    }\n    /* check rate control and vbv delay*/\n    encParams->RC_Type = encOption->rcType;\n\n    if (encOption->vbvDelay == 0.0) /* set to default */\n    {\n        switch (encOption->rcType)\n        {\n            case CBR_1:\n            case CBR_2:\n                encParams->VBV_delay = (float)2.0; /* default 2sec VBV buffer size */\n                break;\n\n            case CBR_LOWDELAY:\n                encParams->VBV_delay = (float)0.5; /* default 0.5sec VBV buffer size */\n                break;\n\n            case VBR_1:\n            case VBR_2:\n                encParams->VBV_delay = (float)10.0; /* default 10sec VBV buffer size */\n                break;\n            default:\n                break;\n        }\n    }\n    else /* force this value */\n    {\n        encParams->VBV_delay = encOption->vbvDelay;\n    }\n\n    /* check search range */\n    if (encParams->H263_Enabled && encOption->searchRange > 16)\n    {\n        encParams->SearchRange = 16; /* 4/16/2001 */\n    }\n\n    /*****************************************/\n    /* checking for conflict between options */\n    /*****************************************/\n\n    if (video->encParams->RC_Type == CBR_1 || video->encParams->RC_Type == CBR_2 || video->encParams->RC_Type == CBR_LOWDELAY)  /* if CBR */\n    {\n#ifdef _PRINT_STAT\n        if (video->encParams->NoFrameSkip_Enabled == PV_ON ||\n                video->encParams->NoPreSkip_Enabled == PV_ON) /* don't allow frame skip*/\n            printf(\"WARNING!!!! CBR with NoFrameSkip\\n\");\n#endif\n    }\n    else if (video->encParams->RC_Type == CONSTANT_Q)   /* constant_Q */\n    {\n        video->encParams->NoFrameSkip_Enabled = PV_ON;  /* no frame skip */\n        video->encParams->NoPreSkip_Enabled = PV_ON;    /* no frame skip */\n#ifdef _PRINT_STAT\n        printf(\"Turn on NoFrameSkip\\n\");\n#endif\n    }\n\n    if (video->encParams->NoFrameSkip_Enabled == PV_ON) /* if no frame skip */\n    {\n        video->encParams->FineFrameSkip_Enabled = PV_OFF;\n#ifdef _PRINT_STAT\n        printf(\"NoFrameSkip !!! may violate VBV_BUFFER constraint.\\n\");\n        printf(\"Turn off FineFrameSkip\\n\");\n#endif\n    }\n\n    /******************************************/\n    /******************************************/\n\n    nLayers = video->encParams->nLayers; /* Number of Layers to be encoded */\n\n    /* Find the maximum width*height for memory allocation of the VOPs */\n    for (idx = 0; idx < nLayers; idx++)\n    {\n        temp_w = video->encParams->LayerWidth[idx];\n        temp_h = video->encParams->LayerHeight[idx];\n\n        if ((temp_w*temp_h) > max)\n        {\n            max = temp_w * temp_h;\n            max_width = ((temp_w + 15) >> 4) << 4;\n            max_height = ((temp_h + 15) >> 4) << 4;\n            nTotalMB = ((max_width * max_height) >> 8);\n        }\n\n        /* Check if the video size and framerate(MBsPerSec) are vald */\n        mbsPerSec = (Int)(nTotalMB * video->encParams->LayerFrameRate[idx]);\n        if (mbsPerSec > video->encParams->LayerMaxMbsPerSec[idx]) status = PV_FALSE;\n    }\n\n    /****************************************************/\n    /* Set Profile and Video Buffer Size for each layer */\n    /****************************************************/\n    if (video->encParams->RC_Type == CBR_LOWDELAY) video->encParams->VBV_delay = 0.5; /* For CBR_LOWDELAY, we set 0.5sec buffer */\n    status = SetProfile_BufferSize(video, video->encParams->VBV_delay, 1);\n    if (status != PV_TRUE)\n        goto CLEAN_UP;\n\n    /****************************************/\n    /* memory allocation and initialization */\n    /****************************************/\n\n    if (video == NULL) goto CLEAN_UP;\n\n    /* cyclic reference for passing through both structures */\n    video->videoEncControls = encoderControl;\n\n    //video->currLayer = 0; /* Set current Layer to 0 */\n    //video->currFrameNo = 0; /* Set current frame Number to 0 */\n    video->nextModTime = 0;\n    video->nextEncIVop = 0; /* Sets up very first frame to be I-VOP! */\n    video->numVopsInGOP = 0; /* counter for Vops in Gop, 2/8/01 */\n\n    //video->frameRate = video->encParams->LayerFrameRate[0]; /* Set current layer frame rate */\n\n    video->QPMB = (UChar *) M4VENC_MALLOC(nTotalMB * sizeof(UChar)); /* Memory for MB quantizers */\n    if (video->QPMB == NULL) goto CLEAN_UP;\n\n\n    video->headerInfo.Mode = (UChar *) M4VENC_MALLOC(sizeof(UChar) * nTotalMB); /* Memory for MB Modes */\n    if (video->headerInfo.Mode == NULL) goto CLEAN_UP;\n    video->headerInfo.CBP = (UChar *) M4VENC_MALLOC(sizeof(UChar) * nTotalMB);   /* Memory for CBP (Y and C) of each MB */\n    if (video->headerInfo.CBP == NULL) goto CLEAN_UP;\n\n    /* Allocating motion vector space and interpolation memory*/\n\n    video->mot = (MOT **)M4VENC_MALLOC(sizeof(MOT *) * nTotalMB);\n    if (video->mot == NULL) goto CLEAN_UP;\n\n    for (idx = 0; idx < nTotalMB; idx++)\n    {\n        video->mot[idx] = (MOT *)M4VENC_MALLOC(sizeof(MOT) * 8);\n        if (video->mot[idx] == NULL)\n        {\n            goto CLEAN_UP;\n        }\n    }\n\n    video->intraArray = (UChar *)M4VENC_MALLOC(sizeof(UChar) * nTotalMB);\n    if (video->intraArray == NULL) goto CLEAN_UP;\n\n    video->sliceNo = (UChar *) M4VENC_MALLOC(nTotalMB); /* Memory for Slice Numbers */\n    if (video->sliceNo == NULL) goto CLEAN_UP;\n    /* Allocating space for predDCAC[][8][16], Not that I intentionally  */\n    /*    increase the dimension of predDCAC from [][6][15] to [][8][16] */\n    /*    so that compilers can generate faster code to indexing the     */\n    /*    data inside (by using << instead of *).         04/14/2000. */\n    /* 5/29/01, use  decoder lib ACDC prediction memory scheme.  */\n    video->predDC = (typeDCStore *) M4VENC_MALLOC(nTotalMB * sizeof(typeDCStore));\n    if (video->predDC == NULL) goto CLEAN_UP;\n\n    if (!video->encParams->H263_Enabled)\n    {\n        video->predDCAC_col = (typeDCACStore *) M4VENC_MALLOC(((max_width >> 4) + 1) * sizeof(typeDCACStore));\n        if (video->predDCAC_col == NULL) goto CLEAN_UP;\n\n        /* element zero will be used for storing vertical (col) AC coefficients */\n        /*  the rest will be used for storing horizontal (row) AC coefficients  */\n        video->predDCAC_row = video->predDCAC_col + 1;        /*  ACDC */\n\n        video->acPredFlag = (Int *) M4VENC_MALLOC(nTotalMB * sizeof(Int)); /* Memory for acPredFlag */\n        if (video->acPredFlag == NULL) goto CLEAN_UP;\n    }\n\n    video->outputMB = (MacroBlock *) M4VENC_MALLOC(sizeof(MacroBlock)); /* Allocating macroblock space */\n    if (video->outputMB == NULL) goto CLEAN_UP;\n    M4VENC_MEMSET(video->outputMB->block[0], 0, (sizeof(Short) << 6)*6);\n\n    M4VENC_MEMSET(video->dataBlock, 0, sizeof(Short) << 7);\n    /* Allocate (2*packetsize) working bitstreams */\n\n    video->bitstream1 = BitStreamCreateEnc(2 * 4096); /*allocate working stream 1*/\n    if (video->bitstream1 == NULL) goto CLEAN_UP;\n    video->bitstream2 = BitStreamCreateEnc(2 * 4096); /*allocate working stream 2*/\n    if (video->bitstream2 == NULL) goto CLEAN_UP;\n    video->bitstream3 = BitStreamCreateEnc(2 * 4096); /*allocate working stream 3*/\n    if (video->bitstream3 == NULL) goto CLEAN_UP;\n\n    /* allocate overrun buffer */\n    // this buffer is used when user's buffer is too small to hold one frame.\n    // It is not needed for slice-based encoding.\n    if (nLayers == 1)\n    {\n        video->oBSize = encParams->BufferSize[0] >> 3;\n    }\n    else\n    {\n        video->oBSize = PV_MAX((encParams->BufferSize[0] >> 3), (encParams->BufferSize[1] >> 3));\n    }\n\n    if (video->oBSize > DEFAULT_OVERRUN_BUFFER_SIZE || encParams->RC_Type == CONSTANT_Q) // set limit\n    {\n        video->oBSize = DEFAULT_OVERRUN_BUFFER_SIZE;\n    }\n    video->overrunBuffer = (UChar*) M4VENC_MALLOC(sizeof(UChar) * video->oBSize);\n    if (video->overrunBuffer == NULL) goto CLEAN_UP;\n\n\n    video->currVop = (Vop *) M4VENC_MALLOC(sizeof(Vop)); /* Memory for Current VOP */\n    if (video->currVop == NULL) goto CLEAN_UP;\n\n    /* add padding, 09/19/05 */\n    if (video->encParams->H263_Enabled) /* make it conditional  11/28/05 */\n    {\n        pitch = max_width;\n        offset = 0;\n    }\n    else\n    {\n        pitch = max_width + 32;\n        offset = (pitch << 4) + 16;\n        max_height += 32;\n    }\n    size = pitch * max_height;\n\n    video->currVop->yChan = (PIXEL *)M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for currVop Y */\n    if (video->currVop->yChan == NULL) goto CLEAN_UP;\n    video->currVop->uChan = video->currVop->yChan + size;/* Memory for currVop U */\n    video->currVop->vChan = video->currVop->uChan + (size >> 2);/* Memory for currVop V */\n\n    /* shift for the offset */\n    if (offset)\n    {\n        video->currVop->yChan += offset; /* offset to the origin.*/\n        video->currVop->uChan += (offset >> 2) + 4;\n        video->currVop->vChan += (offset >> 2) + 4;\n    }\n\n    video->forwardRefVop = video->currVop;      /*  Initialize forwardRefVop */\n    video->backwardRefVop = video->currVop;     /*  Initialize backwardRefVop */\n\n    video->prevBaseVop = (Vop *) M4VENC_MALLOC(sizeof(Vop));         /* Memory for Previous Base Vop */\n    if (video->prevBaseVop == NULL) goto CLEAN_UP;\n    video->prevBaseVop->yChan = (PIXEL *) M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for prevBaseVop Y */\n    if (video->prevBaseVop->yChan == NULL) goto CLEAN_UP;\n    video->prevBaseVop->uChan = video->prevBaseVop->yChan + size; /* Memory for prevBaseVop U */\n    video->prevBaseVop->vChan = video->prevBaseVop->uChan + (size >> 2); /* Memory for prevBaseVop V */\n\n    if (offset)\n    {\n        video->prevBaseVop->yChan += offset; /* offset to the origin.*/\n        video->prevBaseVop->uChan += (offset >> 2) + 4;\n        video->prevBaseVop->vChan += (offset >> 2) + 4;\n    }\n\n\n    if (0) /* If B Frames */\n    {\n        video->nextBaseVop = (Vop *) M4VENC_MALLOC(sizeof(Vop));         /* Memory for Next Base Vop */\n        if (video->nextBaseVop == NULL) goto CLEAN_UP;\n        video->nextBaseVop->yChan = (PIXEL *) M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for nextBaseVop Y */\n        if (video->nextBaseVop->yChan == NULL) goto CLEAN_UP;\n        video->nextBaseVop->uChan = video->nextBaseVop->yChan + size; /* Memory for nextBaseVop U */\n        video->nextBaseVop->vChan = video->nextBaseVop->uChan + (size >> 2); /* Memory for nextBaseVop V */\n\n        if (offset)\n        {\n            video->nextBaseVop->yChan += offset; /* offset to the origin.*/\n            video->nextBaseVop->uChan += (offset >> 2) + 4;\n            video->nextBaseVop->vChan += (offset >> 2) + 4;\n        }\n    }\n\n    if (nLayers > 1)   /* If enhancement layers */\n    {\n        video->prevEnhanceVop = (Vop *) M4VENC_MALLOC(sizeof(Vop));      /* Memory for Previous Enhancement Vop */\n        if (video->prevEnhanceVop == NULL) goto CLEAN_UP;\n        video->prevEnhanceVop->yChan = (PIXEL *) M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for Previous Ehancement Y */\n        if (video->prevEnhanceVop->yChan == NULL) goto CLEAN_UP;\n        video->prevEnhanceVop->uChan = video->prevEnhanceVop->yChan + size; /* Memory for Previous Enhancement U */\n        video->prevEnhanceVop->vChan = video->prevEnhanceVop->uChan + (size >> 2); /* Memory for Previous Enhancement V */\n\n        if (offset)\n        {\n            video->prevEnhanceVop->yChan += offset; /* offset to the origin.*/\n            video->prevEnhanceVop->uChan += (offset >> 2) + 4;\n            video->prevEnhanceVop->vChan += (offset >> 2) + 4;\n        }\n    }\n\n    video->numberOfLayers = nLayers; /* Number of Layers */\n    video->sumMAD = 0;\n\n\n    /* 04/09/01, for Vops in the use multipass processing */\n    for (idx = 0; idx < nLayers; idx++)\n    {\n        video->pMP[idx] = (MultiPass *)M4VENC_MALLOC(sizeof(MultiPass));\n        if (video->pMP[idx] == NULL)    goto CLEAN_UP;\n        M4VENC_MEMSET(video->pMP[idx], 0, sizeof(MultiPass));\n\n        video->pMP[idx]->encoded_frames = -1; /* forget about the very first I frame */\n\n\n        /* RDInfo **pRDSamples */\n        video->pMP[idx]->pRDSamples = (RDInfo **)M4VENC_MALLOC(30 * sizeof(RDInfo *));\n        if (video->pMP[idx]->pRDSamples == NULL)    goto CLEAN_UP;\n        for (i = 0; i < 30; i++)\n        {\n            video->pMP[idx]->pRDSamples[i] = (RDInfo *)M4VENC_MALLOC(32 * sizeof(RDInfo));\n            if (video->pMP[idx]->pRDSamples[i] == NULL) goto CLEAN_UP;\n            for (j = 0; j < 32; j++)    M4VENC_MEMSET(&(video->pMP[idx]->pRDSamples[i][j]), 0, sizeof(RDInfo));\n        }\n        video->pMP[idx]->frameRange = (Int)(video->encParams->LayerFrameRate[idx] * 1.0); /* 1.0s time frame*/\n        video->pMP[idx]->frameRange = PV_MAX(video->pMP[idx]->frameRange, 5);\n        video->pMP[idx]->frameRange = PV_MIN(video->pMP[idx]->frameRange, 30);\n\n        video->pMP[idx]->framePos = -1;\n\n    }\n    /* /// End /////////////////////////////////////// */\n\n\n    video->vol = (Vol **)M4VENC_MALLOC(nLayers * sizeof(Vol *)); /* Memory for VOL pointers */\n\n    /* Memory allocation and Initialization of Vols and writing of headers */\n    if (video->vol == NULL) goto CLEAN_UP;\n\n    for (idx = 0; idx < nLayers; idx++)\n    {\n        video->volInitialize[idx] = 1;\n        video->refTick[idx] = 0;\n        video->relLayerCodeTime[idx] = 1000;\n        video->vol[idx] = (Vol *)M4VENC_MALLOC(sizeof(Vol));\n        if (video->vol[idx] == NULL)  goto CLEAN_UP;\n\n        pVol = video->vol[idx];\n        pEncParams = video->encParams;\n\n        M4VENC_MEMSET(video->vol[idx], 0, sizeof(Vol));\n        /* Initialize some VOL parameters */\n        pVol->volID = idx;  /* Set VOL ID */\n        pVol->shortVideoHeader = pEncParams->H263_Enabled; /*Short Header */\n        pVol->GOVStart = pEncParams->GOV_Enabled; /* GOV Header */\n        pVol->timeIncrementResolution = video->encParams->TimeIncrementRes;\n        pVol->nbitsTimeIncRes = 1;\n        while (pVol->timeIncrementResolution > (1 << pVol->nbitsTimeIncRes))\n        {\n            pVol->nbitsTimeIncRes++;\n        }\n\n        /* timing stuff */\n        pVol->timeIncrement = 0;\n        pVol->moduloTimeBase = 0;\n        pVol->fixedVopRate = 0; /* No fixed VOP rate */\n        pVol->stream = (BitstreamEncVideo *)M4VENC_MALLOC(sizeof(BitstreamEncVideo)); /* allocate BitstreamEncVideo Instance */\n        if (pVol->stream == NULL)  goto CLEAN_UP;\n\n        pVol->width = pEncParams->LayerWidth[idx];      /* Layer Width */\n        pVol->height = pEncParams->LayerHeight[idx];    /* Layer Height */\n        //  pVol->intra_acdcPredDisable = pEncParams->ACDCPrediction; /* ACDC Prediction */\n        pVol->ResyncMarkerDisable = pEncParams->ResyncMarkerDisable; /* Resync Marker Mode */\n        pVol->dataPartitioning = pEncParams->DataPartitioning; /* Data Partitioning */\n        pVol->useReverseVLC = pEncParams->ReversibleVLC; /* RVLC */\n        if (idx > 0) /* Scalability layers */\n        {\n            pVol->ResyncMarkerDisable = 1;\n            pVol->dataPartitioning = 0;\n            pVol->useReverseVLC = 0; /*  No RVLC */\n        }\n        pVol->quantType = pEncParams->QuantType[idx];           /* Quantizer Type */\n\n        /* no need to init Quant Matrices */\n\n        pVol->scalability = 0;  /* Vol Scalability */\n        if (idx > 0)\n            pVol->scalability = 1; /* Multiple layers => Scalability */\n\n        /* Initialize Vol to Temporal scalability.  It can change during encoding */\n        pVol->scalType = 1;\n        /* Initialize reference Vol ID to the base layer = 0 */\n        pVol->refVolID = 0;\n        /* Initialize layer resolution to same as the reference */\n        pVol->refSampDir = 0;\n        pVol->horSamp_m = 1;\n        pVol->horSamp_n = 1;\n        pVol->verSamp_m = 1;\n        pVol->verSamp_n = 1;\n        pVol->enhancementType = 0; /* We always enhance the entire region */\n\n        pVol->nMBPerRow = (pVol->width + 15) / 16;\n        pVol->nMBPerCol = (pVol->height + 15) / 16;\n        pVol->nTotalMB = pVol->nMBPerRow * pVol->nMBPerCol;\n\n        if (pVol->nTotalMB >= 1)\n            pVol->nBitsForMBID = 1;\n        if (pVol->nTotalMB >= 3)\n            pVol->nBitsForMBID = 2;\n        if (pVol->nTotalMB >= 5)\n            pVol->nBitsForMBID = 3;\n        if (pVol->nTotalMB >= 9)\n            pVol->nBitsForMBID = 4;\n        if (pVol->nTotalMB >= 17)\n            pVol->nBitsForMBID = 5;\n        if (pVol->nTotalMB >= 33)\n            pVol->nBitsForMBID = 6;\n        if (pVol->nTotalMB >= 65)\n            pVol->nBitsForMBID = 7;\n        if (pVol->nTotalMB >= 129)\n            pVol->nBitsForMBID = 8;\n        if (pVol->nTotalMB >= 257)\n            pVol->nBitsForMBID = 9;\n        if (pVol->nTotalMB >= 513)\n            pVol->nBitsForMBID = 10;\n        if (pVol->nTotalMB >= 1025)\n            pVol->nBitsForMBID = 11;\n        if (pVol->nTotalMB >= 2049)\n            pVol->nBitsForMBID = 12;\n        if (pVol->nTotalMB >= 4097)\n            pVol->nBitsForMBID = 13;\n        if (pVol->nTotalMB >= 8193)\n            pVol->nBitsForMBID = 14;\n        if (pVol->nTotalMB >= 16385)\n            pVol->nBitsForMBID = 15;\n        if (pVol->nTotalMB >= 32769)\n            pVol->nBitsForMBID = 16;\n        if (pVol->nTotalMB >= 65537)\n            pVol->nBitsForMBID = 17;\n        if (pVol->nTotalMB >= 131073)\n            pVol->nBitsForMBID = 18;\n\n        if (pVol->shortVideoHeader)\n        {\n            switch (pVol->width)\n            {\n                case 128:\n                    if (pVol->height == 96)  /* source_format = 1 */\n                    {\n                        pVol->nGOBinVop = 6;\n                        pVol->nMBinGOB = 8;\n                    }\n                    else\n                        status = PV_FALSE;\n                    break;\n\n                case 176:\n                    if (pVol->height == 144)  /* source_format = 2 */\n                    {\n                        pVol->nGOBinVop = 9;\n                        pVol->nMBinGOB = 11;\n                    }\n                    else\n                        status = PV_FALSE;\n                    break;\n                case 352:\n                    if (pVol->height == 288)  /* source_format = 2 */\n                    {\n                        pVol->nGOBinVop = 18;\n                        pVol->nMBinGOB = 22;\n                    }\n                    else\n                        status = PV_FALSE;\n                    break;\n\n                case 704:\n                    if (pVol->height == 576)  /* source_format = 2 */\n                    {\n                        pVol->nGOBinVop = 18;\n                        pVol->nMBinGOB = 88;\n                    }\n                    else\n                        status = PV_FALSE;\n                    break;\n                case 1408:\n                    if (pVol->height == 1152)  /* source_format = 2 */\n                    {\n                        pVol->nGOBinVop = 18;\n                        pVol->nMBinGOB = 352;\n                    }\n                    else\n                        status = PV_FALSE;\n                    break;\n\n                default:\n                    status = PV_FALSE;\n                    break;\n            }\n        }\n    }\n\n    /***************************************************/\n    /* allocate and initialize rate control parameters */\n    /***************************************************/\n\n    /* BEGIN INITIALIZATION OF ANNEX L RATE CONTROL */\n    if (video->encParams->RC_Type != CONSTANT_Q)\n    {\n        for (idx = 0; idx < nLayers; idx++) /* 12/25/00 */\n        {\n            video->rc[idx] =\n                (rateControl *)M4VENC_MALLOC(sizeof(rateControl));\n\n            if (video->rc[idx] == NULL) goto CLEAN_UP;\n\n            M4VENC_MEMSET(video->rc[idx], 0, sizeof(rateControl));\n        }\n        if (PV_SUCCESS != RC_Initialize(video))\n        {\n            goto CLEAN_UP;\n        }\n        /* initialization for 2-pass rate control */\n    }\n    /* END INITIALIZATION OF ANNEX L RATE CONTROL */\n\n    /********** assign platform dependent functions ***********************/\n    /* 1/23/01 */\n    /* This must be done at run-time not a compile time */\n    video->functionPointer = (FuncPtr*) M4VENC_MALLOC(sizeof(FuncPtr));\n    if (video->functionPointer == NULL) goto CLEAN_UP;\n\n    video->functionPointer->ComputeMBSum = &ComputeMBSum_C;\n    video->functionPointer->SAD_MB_HalfPel[0] = NULL;\n    video->functionPointer->SAD_MB_HalfPel[1] = &SAD_MB_HalfPel_Cxh;\n    video->functionPointer->SAD_MB_HalfPel[2] = &SAD_MB_HalfPel_Cyh;\n    video->functionPointer->SAD_MB_HalfPel[3] = &SAD_MB_HalfPel_Cxhyh;\n\n#ifndef NO_INTER4V\n    video->functionPointer->SAD_Blk_HalfPel = &SAD_Blk_HalfPel_C;\n    video->functionPointer->SAD_Block = &SAD_Block_C;\n#endif\n    video->functionPointer->SAD_Macroblock = &SAD_Macroblock_C;\n    video->functionPointer->ChooseMode = &ChooseMode_C;\n    video->functionPointer->GetHalfPelMBRegion = &GetHalfPelMBRegion_C;\n//  video->functionPointer->SAD_MB_PADDING = &SAD_MB_PADDING; /* 4/21/01 */\n\n\n    encoderControl->videoEncoderInit = 1;  /* init done! */\n\n    return PV_TRUE;\n\nCLEAN_UP:\n    PVCleanUpVideoEncoder(encoderControl);\n\n    return PV_FALSE;\n}\n\n\n/* ======================================================================== */\n/*  Function : PVCleanUpVideoEncoder()                                      */\n/*  Date     : 08/22/2000                                                   */\n/*  Purpose  : Deallocates allocated memory from InitVideoEncoder()         */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified : 5/21/01, free only yChan in Vop                          */\n/*                                                                          */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF Bool    PVCleanUpVideoEncoder(VideoEncControls *encoderControl)\n{\n    Int idx, i;\n    VideoEncData *video = (VideoEncData *)encoderControl->videoEncoderData;\n    int nTotalMB;\n    int max_width, offset;\n\n#ifdef PRINT_RC_INFO\n    if (facct != NULL)\n    {\n        fprintf(facct, \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\\n\");\n        fprintf(facct, \"TOTAL NUM BITS GENERATED %d\\n\", tiTotalNumBitsGenerated);\n        fprintf(facct, \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\\n\");\n        fprintf(facct, \"TOTAL NUMBER OF FRAMES CODED %d\\n\",\n                video->encParams->rc[0]->totalFrameNumber);\n        fprintf(facct, \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\\n\");\n        fprintf(facct, \"Average BitRate %d\\n\",\n                (tiTotalNumBitsGenerated / (90 / 30)));\n        fprintf(facct, \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\\n\");\n        fprintf(facct, \"TOTAL NUMBER OF STUFF BITS %d\\n\", (iStuffBits + 10740));\n        fprintf(facct, \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\\n\");\n        fprintf(facct, \"TOTAL NUMBER OF BITS TO NETWORK %d\\n\", (35800*90 / 30));;\n        fprintf(facct, \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\\n\");\n        fprintf(facct, \"SUM OF STUFF BITS AND GENERATED BITS %d\\n\",\n                (tiTotalNumBitsGenerated + iStuffBits + 10740));\n        fprintf(facct, \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\\n\");\n        fprintf(facct, \"UNACCOUNTED DIFFERENCE %d\\n\",\n                ((35800*90 / 30) - (tiTotalNumBitsGenerated + iStuffBits + 10740)));\n        fprintf(facct, \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\\n\");\n        fclose(facct);\n    }\n#endif\n\n#ifdef PRINT_EC\n    fclose(fec);\n#endif\n\n    if (video != NULL)\n    {\n\n        if (video->QPMB) M4VENC_FREE(video->QPMB);\n        if (video->headerInfo.Mode)M4VENC_FREE(video->headerInfo.Mode);\n        if (video->headerInfo.CBP)M4VENC_FREE(video->headerInfo.CBP);\n\n\n        if (video->mot)\n        {\n            nTotalMB = video->vol[0]->nTotalMB;\n            for (idx = 1; idx < video->currLayer; idx++)\n                if (video->vol[idx]->nTotalMB > nTotalMB)\n                    nTotalMB = video->vol[idx]->nTotalMB;\n            for (idx = 0; idx < nTotalMB; idx++)\n            {\n                if (video->mot[idx])\n                    M4VENC_FREE(video->mot[idx]);\n            }\n            M4VENC_FREE(video->mot);\n        }\n\n        if (video->intraArray) M4VENC_FREE(video->intraArray);\n\n        if (video->sliceNo)M4VENC_FREE(video->sliceNo);\n        if (video->acPredFlag)M4VENC_FREE(video->acPredFlag);\n//      if(video->predDCAC)M4VENC_FREE(video->predDCAC);\n        if (video->predDC) M4VENC_FREE(video->predDC);\n        video->predDCAC_row = NULL;\n        if (video->predDCAC_col) M4VENC_FREE(video->predDCAC_col);\n        if (video->outputMB)M4VENC_FREE(video->outputMB);\n\n        if (video->bitstream1)BitstreamCloseEnc(video->bitstream1);\n        if (video->bitstream2)BitstreamCloseEnc(video->bitstream2);\n        if (video->bitstream3)BitstreamCloseEnc(video->bitstream3);\n\n        if (video->overrunBuffer) M4VENC_FREE(video->overrunBuffer);\n\n        max_width = video->encParams->LayerWidth[0];\n        max_width = (((max_width + 15) >> 4) << 4); /* 09/19/05 */\n        if (video->encParams->H263_Enabled)\n        {\n            offset = 0;\n        }\n        else\n        {\n            offset = ((max_width + 32) << 4) + 16;\n        }\n\n        if (video->currVop)\n        {\n            if (video->currVop->yChan)\n            {\n                video->currVop->yChan -= offset;\n                M4VENC_FREE(video->currVop->yChan);\n            }\n            M4VENC_FREE(video->currVop);\n        }\n\n        if (video->nextBaseVop)\n        {\n            if (video->nextBaseVop->yChan)\n            {\n                video->nextBaseVop->yChan -= offset;\n                M4VENC_FREE(video->nextBaseVop->yChan);\n            }\n            M4VENC_FREE(video->nextBaseVop);\n        }\n\n        if (video->prevBaseVop)\n        {\n            if (video->prevBaseVop->yChan)\n            {\n                video->prevBaseVop->yChan -= offset;\n                M4VENC_FREE(video->prevBaseVop->yChan);\n            }\n            M4VENC_FREE(video->prevBaseVop);\n        }\n        if (video->prevEnhanceVop)\n        {\n            if (video->prevEnhanceVop->yChan)\n            {\n                video->prevEnhanceVop->yChan -= offset;\n                M4VENC_FREE(video->prevEnhanceVop->yChan);\n            }\n            M4VENC_FREE(video->prevEnhanceVop);\n        }\n\n        /* 04/09/01, for Vops in the use multipass processing */\n        for (idx = 0; idx < video->encParams->nLayers; idx++)\n        {\n            if (video->pMP[idx])\n            {\n                if (video->pMP[idx]->pRDSamples)\n                {\n                    for (i = 0; i < 30; i++)\n                    {\n                        if (video->pMP[idx]->pRDSamples[i])\n                            M4VENC_FREE(video->pMP[idx]->pRDSamples[i]);\n                    }\n                    M4VENC_FREE(video->pMP[idx]->pRDSamples);\n                }\n\n                M4VENC_MEMSET(video->pMP[idx], 0, sizeof(MultiPass));\n                M4VENC_FREE(video->pMP[idx]);\n            }\n        }\n        /* //  End /////////////////////////////////////// */\n\n        if (video->vol)\n        {\n            for (idx = 0; idx < video->encParams->nLayers; idx++)\n            {\n                if (video->vol[idx])\n                {\n                    if (video->vol[idx]->stream)\n                        M4VENC_FREE(video->vol[idx]->stream);\n                    M4VENC_FREE(video->vol[idx]);\n                }\n            }\n            M4VENC_FREE(video->vol);\n        }\n\n        /***************************************************/\n        /* stop rate control parameters */\n        /***************************************************/\n\n        /* ANNEX L RATE CONTROL */\n        if (video->encParams->RC_Type != CONSTANT_Q)\n        {\n            RC_Cleanup(video->rc, video->encParams->nLayers);\n\n            for (idx = 0; idx < video->encParams->nLayers; idx++)\n            {\n                if (video->rc[idx])\n                    M4VENC_FREE(video->rc[idx]);\n            }\n        }\n\n        if (video->functionPointer) M4VENC_FREE(video->functionPointer);\n\n        /* If application has called PVCleanUpVideoEncoder then we deallocate */\n        /* If PVInitVideoEncoder class it, then we DO NOT deallocate */\n        if (video->encParams)\n        {\n            M4VENC_FREE(video->encParams);\n        }\n\n        M4VENC_FREE(video);\n        encoderControl->videoEncoderData = NULL; /* video */\n    }\n\n    encoderControl->videoEncoderInit = 0;\n\n    return PV_TRUE;\n}\n\n/* ======================================================================== */\n/*  Function : PVGetVolHeader()                                             */\n/*  Date     : 7/17/2001,                                                   */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF Bool PVGetVolHeader(VideoEncControls *encCtrl, UChar *volHeader, Int *size, Int layer)\n{\n    VideoEncData    *encData;\n    PV_STATUS   EncodeVOS_Start(VideoEncControls *encCtrl);\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n\n    if (encData == NULL)\n        return PV_FALSE;\n    if (encData->encParams == NULL)\n        return PV_FALSE;\n\n\n    encData->currLayer = layer; /* Set Layer */\n    /*pv_status = */\n    EncodeVOS_Start(encCtrl); /* Encode VOL Header */\n\n    encData->encParams->GetVolHeader[layer] = 1; /* Set usage flag: Needed to support old method*/\n\n    /* Copy bitstream to buffer and set the size */\n\n    if (*size > encData->bitstream1->byteCount)\n    {\n        *size = encData->bitstream1->byteCount;\n        M4VENC_MEMCPY(volHeader, encData->bitstream1->bitstreamBuffer, *size);\n    }\n    else\n        return PV_FALSE;\n\n    /* Reset bitstream1 buffer parameters */\n    BitstreamEncReset(encData->bitstream1);\n\n    return PV_TRUE;\n}\n\n/* ======================================================================== */\n/*  Function : PVGetOverrunBuffer()                                         */\n/*  Purpose  : Get the overrun buffer `                                     */\n/*  In/out   :                                                              */\n/*  Return   : Pointer to overrun buffer.                                   */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF UChar* PVGetOverrunBuffer(VideoEncControls *encCtrl)\n{\n    VideoEncData *video = (VideoEncData *)encCtrl->videoEncoderData;\n    Int currLayer = video->currLayer;\n    Vol *currVol = video->vol[currLayer];\n\n    if (currVol->stream->bitstreamBuffer != video->overrunBuffer) // not used\n    {\n        return NULL;\n    }\n\n    return video->overrunBuffer;\n}\n\n\n\n\n/* ======================================================================== */\n/*  Function : EncodeVideoFrame()                                           */\n/*  Date     : 08/22/2000                                                   */\n/*  Purpose  : Encode video frame and return bitstream                      */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*  02.14.2001                                      */\n/*              Finishing new timestamp 32-bit input                        */\n/*              Applications need to take care of wrap-around               */\n/* ======================================================================== */\nOSCL_EXPORT_REF Bool PVEncodeVideoFrame(VideoEncControls *encCtrl, VideoEncFrameIO *vid_in, VideoEncFrameIO *vid_out,\n                                        ULong *nextModTime, UChar *bstream, Int *size, Int *nLayer)\n{\n    Bool status = PV_TRUE;\n    PV_STATUS pv_status;\n    VideoEncData *video = (VideoEncData *)encCtrl->videoEncoderData;\n    VideoEncParams *encParams = video->encParams;\n    Vol *currVol;\n    Vop *tempForwRefVop = NULL;\n    Int tempRefSelCode = 0;\n    PV_STATUS   EncodeVOS_Start(VideoEncControls *encCtrl);\n    Int width_16, height_16;\n    Int width, height;\n    Vop *temp;\n    Int encodeVop = 0;\n    void  PaddingEdge(Vop *padVop);\n    Int currLayer = -1;\n    //Int nLayers = encParams->nLayers;\n\n    ULong modTime = vid_in->timestamp;\n\n#ifdef RANDOM_REFSELCODE   /* add random selection of reference Vop */\n    Int random_val[30] = {0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0};\n    static Int rand_idx = 0;\n#endif\n\n    /*******************************************************/\n    /* Determine Next Vop to encode, if any, and nLayer    */\n    /*******************************************************/\n    //i = nLayers-1;\n\n    if (video->volInitialize[0]) /* first vol to code */\n    {\n    \t/* AGI RCS 08/12/09 */\n        video->nextModTime = video->modTimeRef = ((modTime) - ((modTime) % video->encParams->TimeIncrementRes));\n    }\n\n    encodeVop = DetermineCodingLayer(video, nLayer, modTime);\n    currLayer = *nLayer;\n    if ((currLayer < 0) || (currLayer > encParams->nLayers - 1))\n        return PV_FALSE;\n\n    /******************************************/\n    /* If post-skipping still effective --- return */\n    /******************************************/\n\n    if (!encodeVop) /* skip enh layer, no base layer coded --- return */\n    {\n#ifdef _PRINT_STAT\n        printf(\"No frame coded. Continue to next frame.\");\n#endif\n        /* expected next code time, convert back to millisec */\n        *nextModTime = video->nextModTime;\n\n#ifdef ALLOW_VOP_NOT_CODED\n        if (video->vol[0]->shortVideoHeader) /* Short Video Header = 1 */\n        {\n            *size = 0;\n            *nLayer = -1;\n        }\n        else\n        {\n            *nLayer = 0;\n            EncodeVopNotCoded(video, bstream, size, modTime);\n            *size = video->vol[0]->stream->byteCount;\n        }\n#else\n        *size = 0;\n        *nLayer = -1;\n#endif\n        return status;\n    }\n\n\n//ENCODE_VOP_AGAIN:  /* 12/30/00 */\n\n    /**************************************************************/\n    /* Initialize Vol stream structure with application bitstream */\n    /**************************************************************/\n\n    currVol = video->vol[currLayer];\n    currVol->stream->bitstreamBuffer = bstream;\n    currVol->stream->bufferSize = *size;\n    BitstreamEncReset(currVol->stream);\n    BitstreamSetOverrunBuffer(currVol->stream, video->overrunBuffer, video->oBSize, video);\n\n    /***********************************************************/\n    /* Encode VOS and VOL Headers on first call for each layer */\n    /***********************************************************/\n\n    if (video->volInitialize[currLayer])\n    {\n        video->currVop->timeInc = 0;\n        video->prevBaseVop->timeInc = 0;\n        if (!video->encParams->GetVolHeader[currLayer])\n            pv_status = EncodeVOS_Start(encCtrl);\n    }\n\n    /***************************************************/\n    /* Copy Input Video Frame to Internal Video Buffer */\n    /***************************************************/\n    /* Determine Width and Height of Vop Layer */\n\n    width = encParams->LayerWidth[currLayer];   /* Get input width */\n    height = encParams->LayerHeight[currLayer]; /* Get input height */\n    /* Round Up to nearest multiple of 16 : MPEG-4 Standard */\n\n    width_16 = ((width + 15) / 16) * 16;            /* Round up to nearest multiple of 16 */\n    height_16 = ((height + 15) / 16) * 16;          /* Round up to nearest multiple of 16 */\n\n    video->input = vid_in;  /* point to the frame input */\n\n    /*//  End ////////////////////////////// */\n\n\n    /**************************************/\n    /* Determine VOP Type                 */\n    /* 6/2/2001, separate function      */\n    /**************************************/\n    DetermineVopType(video, currLayer);\n\n    /****************************/\n    /*    Initialize VOP        */\n    /****************************/\n    video->currVop->volID = currVol->volID;\n    video->currVop->width = width_16;\n    video->currVop->height = height_16;\n    if (video->encParams->H263_Enabled) /*  11/28/05 */\n    {\n        video->currVop->pitch = width_16;\n    }\n    else\n    {\n        video->currVop->pitch = width_16 + 32;\n    }\n    video->currVop->timeInc = currVol->timeIncrement;\n    video->currVop->vopCoded = 1;\n    video->currVop->roundingType = 0;\n    video->currVop->intraDCVlcThr = encParams->IntraDCVlcThr;\n\n    if (currLayer == 0\n#ifdef RANDOM_REFSELCODE   /* add random selection of reference Vop */\n            || random_val[rand_idx] || video->volInitialize[currLayer]\n#endif\n       )\n    {\n        tempForwRefVop = video->forwardRefVop; /* keep initial state */\n        if (tempForwRefVop != NULL) tempRefSelCode = tempForwRefVop->refSelectCode;\n\n        video->forwardRefVop = video->prevBaseVop;\n        video->forwardRefVop->refSelectCode = 1;\n    }\n#ifdef RANDOM_REFSELCODE\n    else\n    {\n        tempForwRefVop = video->forwardRefVop; /* keep initial state */\n        if (tempForwRefVop != NULL) tempRefSelCode = tempForwRefVop->refSelectCode;\n\n        video->forwardRefVop = video->prevEnhanceVop;\n        video->forwardRefVop->refSelectCode = 0;\n    }\n    rand_idx++;\n    rand_idx %= 30;\n#endif\n\n    video->currVop->refSelectCode = video->forwardRefVop->refSelectCode;\n    video->currVop->gobNumber = 0;\n    video->currVop->gobFrameID = video->currVop->predictionType;\n    /*\n     * AGI 08/12/2009\n     */\n    video->currVop->temporalRef = (Int)(modTime * video->FrameRate / video->encParams->TimeIncrementRes) % 256;\n\n    video->currVop->temporalInterval = 0;\n\n    if (video->currVop->predictionType == I_VOP)\n        video->currVop->quantizer = encParams->InitQuantIvop[currLayer];\n    else\n        video->currVop->quantizer = encParams->InitQuantPvop[currLayer];\n\n\n    /****************/\n    /* Encode Vop */\n    /****************/\n    video->slice_coding = 0;\n\n    pv_status = EncodeVop(video);\n#ifdef _PRINT_STAT\n    if (video->currVop->predictionType == I_VOP)\n        printf(\" I-VOP \");\n    else\n        printf(\" P-VOP (ref.%d)\", video->forwardRefVop->refSelectCode);\n#endif\n\n    /************************************/\n    /* Update Skip Next Frame           */\n    /************************************/\n    *nLayer = UpdateSkipNextFrame(video, nextModTime, size, pv_status);\n    if (*nLayer == -1) /* skip current frame */\n    {\n        /* make sure that pointers are restored to the previous state */\n        if (currLayer == 0)\n        {\n            video->forwardRefVop = tempForwRefVop; /* For P-Vop base only */\n            video->forwardRefVop->refSelectCode = tempRefSelCode;\n        }\n\n        return status;\n    }\n\n    /* If I-VOP was encoded, reset IntraPeriod */\n    if ((currLayer == 0) && (encParams->IntraPeriod > 0) && (video->currVop->predictionType == I_VOP))\n        video->nextEncIVop = encParams->IntraPeriod;\n\n    /* Set HintTrack Information */\n    if (currLayer != -1)\n    {\n        if (currVol->prevModuloTimeBase)\n            video->hintTrackInfo.MTB = 1;\n        else\n            video->hintTrackInfo.MTB = 0;\n        video->hintTrackInfo.LayerID = (UChar)currVol->volID;\n        video->hintTrackInfo.CodeType = (UChar)video->currVop->predictionType;\n        video->hintTrackInfo.RefSelCode = (UChar)video->currVop->refSelectCode;\n    }\n\n    /************************************************/\n    /* Determine nLayer and timeInc for next encode */\n    /* 12/27/00 always go by the highest layer*/\n    /************************************************/\n\n    /**********************************************************/\n    /* Copy Reconstructed Buffer to Output Video Frame Buffer */\n    /**********************************************************/\n    vid_out->yChan = video->currVop->yChan;\n    vid_out->uChan = video->currVop->uChan;\n    vid_out->vChan = video->currVop->vChan;\n    if (video->encParams->H263_Enabled)\n    {\n        vid_out->height = video->currVop->height; /* padded height */\n        vid_out->pitch = video->currVop->width; /* padded width */\n    }\n    else\n    {\n        vid_out->height = video->currVop->height + 32; /* padded height */\n        vid_out->pitch = video->currVop->width + 32; /* padded width */\n    }\n    //video_out->timestamp = video->modTime;\n    /* AGI RCS 08/12/09 */\n    vid_out->timestamp = (ULong)(((video->prevFrameNum[currLayer] * video->encParams->TimeIncrementRes) / encParams->LayerFrameRate[currLayer]) + video->modTimeRef + 0.5);\n\n    /*// End /////////////////////// */\n\n    /***********************************/\n    /* Update Ouput bstream byte count */\n    /***********************************/\n\n    *size = currVol->stream->byteCount;\n\n    /****************************************/\n    /* Swap Vop Pointers for Base Layer     */\n    /****************************************/\n    if (currLayer == 0)\n    {\n        temp = video->prevBaseVop;\n        video->prevBaseVop = video->currVop;\n        video->prevBaseVop->padded = 0; /* not padded */\n        video->currVop  = temp;\n        video->forwardRefVop = video->prevBaseVop; /* For P-Vop base only */\n        video->forwardRefVop->refSelectCode = 1;\n    }\n    else\n    {\n        temp = video->prevEnhanceVop;\n        video->prevEnhanceVop = video->currVop;\n        video->prevEnhanceVop->padded = 0; /* not padded */\n        video->currVop = temp;\n        video->forwardRefVop = video->prevEnhanceVop;\n        video->forwardRefVop->refSelectCode = 0;\n    }\n\n    /****************************************/\n    /* Modify the intialize flag at the end.*/\n    /****************************************/\n    if (video->volInitialize[currLayer])\n        video->volInitialize[currLayer] = 0;\n\n    return status;\n}\n\n#ifndef NO_SLICE_ENCODE\n/* ======================================================================== */\n/*  Function : PVEncodeFrameSet()                                           */\n/*  Date     : 04/18/2000                                                   */\n/*  Purpose  : Enter a video frame and perform front-end time check plus ME */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\nOSCL_EXPORT_REF Bool PVEncodeFrameSet(VideoEncControls *encCtrl, VideoEncFrameIO *vid_in, ULong *nextModTime, Int *nLayer)\n{\n    Bool status = PV_TRUE;\n    VideoEncData *video = (VideoEncData *)encCtrl->videoEncoderData;\n    VideoEncParams *encParams = video->encParams;\n    Vol *currVol;\n    PV_STATUS   EncodeVOS_Start(VideoEncControls *encCtrl);\n    Int width_16, height_16;\n    Int width, height;\n    Int encodeVop = 0;\n    void  PaddingEdge(Vop *padVop);\n    Int currLayer = -1;\n    //Int nLayers = encParams->nLayers;\n\n    ULong   modTime = vid_in->timestamp;\n\n#ifdef RANDOM_REFSELCODE   /* add random selection of reference Vop */\n    Int random_val[30] = {0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0};\n    static Int rand_idx = 0;\n#endif\n    /*******************************************************/\n    /* Determine Next Vop to encode, if any, and nLayer    */\n    /*******************************************************/\n\n    video->modTime = modTime;\n\n    //i = nLayers-1;\n\n    if (video->volInitialize[0]) /* first vol to code */\n    {\n    \t/* AGI RCS 08/12/09 */\n        video->nextModTime = video->modTimeRef = ((modTime) - ((modTime) % video->encParams->TimeIncrementRes));\n    }\n\n\n    encodeVop = DetermineCodingLayer(video, nLayer, modTime);\n\n    currLayer = *nLayer;\n\n    /******************************************/\n    /* If post-skipping still effective --- return */\n    /******************************************/\n\n    if (!encodeVop) /* skip enh layer, no base layer coded --- return */\n    {\n#ifdef _PRINT_STAT\n        printf(\"No frame coded. Continue to next frame.\");\n#endif\n        *nLayer = -1;\n\n        /* expected next code time, convert back to millisec */\n        *nextModTime = video->nextModTime;;\n        return status;\n    }\n\n    /**************************************************************/\n    /* Initialize Vol stream structure with application bitstream */\n    /**************************************************************/\n\n    currVol = video->vol[currLayer];\n    currVol->stream->bufferSize = 0;\n    BitstreamEncReset(currVol->stream);\n\n    /***********************************************************/\n    /* Encode VOS and VOL Headers on first call for each layer */\n    /***********************************************************/\n\n    if (video->volInitialize[currLayer])\n    {\n        video->currVop->timeInc = 0;\n        video->prevBaseVop->timeInc = 0;\n    }\n\n    /***************************************************/\n    /* Copy Input Video Frame to Internal Video Buffer */\n    /***************************************************/\n    /* Determine Width and Height of Vop Layer */\n\n    width = encParams->LayerWidth[currLayer];   /* Get input width */\n    height = encParams->LayerHeight[currLayer]; /* Get input height */\n    /* Round Up to nearest multiple of 16 : MPEG-4 Standard */\n\n    width_16 = ((width + 15) / 16) * 16;            /* Round up to nearest multiple of 16 */\n    height_16 = ((height + 15) / 16) * 16;          /* Round up to nearest multiple of 16 */\n\n    video->input = vid_in;  /* point to the frame input */\n\n    /*//  End ////////////////////////////// */\n\n\n    /**************************************/\n    /* Determine VOP Type                 */\n    /* 6/2/2001, separate function      */\n    /**************************************/\n    DetermineVopType(video, currLayer);\n\n    /****************************/\n    /*    Initialize VOP        */\n    /****************************/\n    video->currVop->volID = currVol->volID;\n    video->currVop->width = width_16;\n    video->currVop->height = height_16;\n    if (video->encParams->H263_Enabled) /*  11/28/05 */\n    {\n        video->currVop->pitch = width_16;\n    }\n    else\n    {\n        video->currVop->pitch = width_16 + 32;\n    }\n    video->currVop->timeInc = currVol->timeIncrement;\n    video->currVop->vopCoded = 1;\n    video->currVop->roundingType = 0;\n    video->currVop->intraDCVlcThr = encParams->IntraDCVlcThr;\n\n    if (currLayer == 0\n#ifdef RANDOM_REFSELCODE   /* add random selection of reference Vop */\n            || random_val[rand_idx] || video->volInitialize[currLayer]\n#endif\n       )\n    {\n        video->tempForwRefVop = video->forwardRefVop; /* keep initial state */\n        if (video->tempForwRefVop != NULL) video->tempRefSelCode = video->tempForwRefVop->refSelectCode;\n\n        video->forwardRefVop = video->prevBaseVop;\n        video->forwardRefVop->refSelectCode = 1;\n    }\n#ifdef RANDOM_REFSELCODE\n    else\n    {\n        video->tempForwRefVop = video->forwardRefVop; /* keep initial state */\n        if (video->tempForwRefVop != NULL) video->tempRefSelCode = video->tempForwRefVop->refSelectCode;\n\n        video->forwardRefVop = video->prevEnhanceVop;\n        video->forwardRefVop->refSelectCode = 0;\n    }\n    rand_idx++;\n    rand_idx %= 30;\n#endif\n\n    video->currVop->refSelectCode = video->forwardRefVop->refSelectCode;\n    video->currVop->gobNumber = 0;\n    video->currVop->gobFrameID = video->currVop->predictionType;\n    /* AGI RCS 08/12/09 */\n    video->currVop->temporalRef = (Int)((modTime) * video->FrameRate / video->encParams->TimeIncrementRes) % 256;\n\n    video->currVop->temporalInterval = 0;\n\n    if (video->currVop->predictionType == I_VOP)\n        video->currVop->quantizer = encParams->InitQuantIvop[currLayer];\n    else\n        video->currVop->quantizer = encParams->InitQuantPvop[currLayer];\n\n    /****************/\n    /* Encode Vop   */\n    /****************/\n    video->slice_coding = 1;\n\n    /*pv_status =*/\n    EncodeVop(video);\n\n#ifdef _PRINT_STAT\n    if (video->currVop->predictionType == I_VOP)\n        printf(\" I-VOP \");\n    else\n        printf(\" P-VOP (ref.%d)\", video->forwardRefVop->refSelectCode);\n#endif\n\n    /* Set HintTrack Information */\n    if (currVol->prevModuloTimeBase)\n        video->hintTrackInfo.MTB = 1;\n    else\n        video->hintTrackInfo.MTB = 0;\n\n    video->hintTrackInfo.LayerID = (UChar)currVol->volID;\n    video->hintTrackInfo.CodeType = (UChar)video->currVop->predictionType;\n    video->hintTrackInfo.RefSelCode = (UChar)video->currVop->refSelectCode;\n\n    return status;\n}\n#endif /* NO_SLICE_ENCODE */\n\n#ifndef NO_SLICE_ENCODE\n/* ======================================================================== */\n/*  Function : PVEncodePacket()                                             */\n/*  Date     : 04/18/2002                                                   */\n/*  Purpose  : Encode one packet and return bitstream                       */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\nOSCL_EXPORT_REF Bool PVEncodeSlice(VideoEncControls *encCtrl, UChar *bstream, Int *size,\n                                   Int *endofFrame, VideoEncFrameIO *vid_out, ULong *nextModTime)\n{\n    PV_STATUS pv_status;\n    VideoEncData *video = (VideoEncData *)encCtrl->videoEncoderData;\n    VideoEncParams *encParams = video->encParams;\n    Vol *currVol;\n    PV_STATUS   EncodeVOS_Start(VideoEncControls *encCtrl);\n    Vop *temp;\n    void  PaddingEdge(Vop *padVop);\n    Int currLayer = video->currLayer;\n    Int pre_skip;\n    Int pre_size;\n    /**************************************************************/\n    /* Initialize Vol stream structure with application bitstream */\n    /**************************************************************/\n\n    currVol = video->vol[currLayer];\n    currVol->stream->bitstreamBuffer = bstream;\n    pre_size = currVol->stream->byteCount;\n    currVol->stream->bufferSize = pre_size + (*size);\n\n    /***********************************************************/\n    /* Encode VOS and VOL Headers on first call for each layer */\n    /***********************************************************/\n\n    if (video->volInitialize[currLayer])\n    {\n        if (!video->encParams->GetVolHeader[currLayer])\n            pv_status = EncodeVOS_Start(encCtrl);\n    }\n\n    /****************/\n    /* Encode Slice */\n    /****************/\n    pv_status = EncodeSlice(video);\n\n    *endofFrame = 0;\n\n    if (video->mbnum >= currVol->nTotalMB && !video->end_of_buf)\n    {\n        *endofFrame = 1;\n\n        /************************************/\n        /* Update Skip Next Frame           */\n        /************************************/\n        pre_skip = UpdateSkipNextFrame(video, nextModTime, size, pv_status); /* modified such that no pre-skipped */\n\n        if (pre_skip == -1) /* error */\n        {\n            *endofFrame = -1;\n            /* make sure that pointers are restored to the previous state */\n            if (currLayer == 0)\n            {\n                video->forwardRefVop = video->tempForwRefVop; /* For P-Vop base only */\n                video->forwardRefVop->refSelectCode = video->tempRefSelCode;\n            }\n\n            return pv_status;\n        }\n\n        /* If I-VOP was encoded, reset IntraPeriod */\n        if ((currLayer == 0) && (encParams->IntraPeriod > 0) && (video->currVop->predictionType == I_VOP))\n            video->nextEncIVop = encParams->IntraPeriod;\n\n        /**********************************************************/\n        /* Copy Reconstructed Buffer to Output Video Frame Buffer */\n        /**********************************************************/\n        vid_out->yChan = video->currVop->yChan;\n        vid_out->uChan = video->currVop->uChan;\n        vid_out->vChan = video->currVop->vChan;\n        if (video->encParams->H263_Enabled)\n        {\n            vid_out->height = video->currVop->height; /* padded height */\n            vid_out->pitch = video->currVop->width; /* padded width */\n        }\n        else\n        {\n            vid_out->height = video->currVop->height + 32; /* padded height */\n            vid_out->pitch = video->currVop->width + 32; /* padded width */\n        }\n        //vid_out->timestamp = video->modTime;\n        vid_out->timestamp = (ULong)(((video->prevFrameNum[currLayer] * 1000) / encParams->LayerFrameRate[currLayer]) + video->modTimeRef + 0.5);\n\n        /*// End /////////////////////// */\n\n        /****************************************/\n        /* Swap Vop Pointers for Base Layer     */\n        /****************************************/\n\n        if (currLayer == 0)\n        {\n            temp = video->prevBaseVop;\n            video->prevBaseVop = video->currVop;\n            video->prevBaseVop->padded = 0; /* not padded */\n            video->currVop = temp;\n            video->forwardRefVop = video->prevBaseVop; /* For P-Vop base only */\n            video->forwardRefVop->refSelectCode = 1;\n        }\n        else\n        {\n            temp = video->prevEnhanceVop;\n            video->prevEnhanceVop = video->currVop;\n            video->prevEnhanceVop->padded = 0; /* not padded */\n            video->currVop = temp;\n            video->forwardRefVop = video->prevEnhanceVop;\n            video->forwardRefVop->refSelectCode = 0;\n        }\n    }\n\n    /***********************************/\n    /* Update Ouput bstream byte count */\n    /***********************************/\n\n    *size = currVol->stream->byteCount - pre_size;\n\n    /****************************************/\n    /* Modify the intialize flag at the end.*/\n    /****************************************/\n    if (video->volInitialize[currLayer])\n        video->volInitialize[currLayer] = 0;\n\n    return pv_status;\n}\n#endif /* NO_SLICE_ENCODE */\n\n\n/* ======================================================================== */\n/*  Function : PVGetH263ProfileLevelID()                                    */\n/*  Date     : 02/05/2003                                                   */\n/*  Purpose  : Get H.263 Profile ID and level ID for profile 0              */\n/*  In/out   : Profile ID=0, levelID is what we want                        */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*  Note     : h263Level[8], rBR_bound[8], max_h263_framerate[2]            */\n/*             max_h263_width[2], max_h263_height[2] are global             */\n/*                                                                          */\n/* ======================================================================== */\nOSCL_EXPORT_REF Bool PVGetH263ProfileLevelID(VideoEncControls *encCtrl, Int *profileID, Int *levelID)\n{\n    VideoEncData *encData;\n    Int width, height;\n    float bitrate_r, framerate;\n\n\n    /* For this version, we only support H.263 profile 0 */\n    *profileID = 0;\n\n    *levelID = 0;\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (encData == NULL)\n        return PV_FALSE;\n    if (encData->encParams == NULL)\n        return PV_FALSE;\n\n    if (!encData->encParams->H263_Enabled) return PV_FALSE;\n\n\n    /* get image width, height, bitrate and framerate */\n    width     = encData->encParams->LayerWidth[0];\n    height    = encData->encParams->LayerHeight[0];\n    bitrate_r = (float)(encData->encParams->LayerBitRate[0]) / (float)64000.0;\n    framerate = encData->encParams->LayerFrameRate[0];\n    if (!width || !height || !(bitrate_r > 0 && framerate > 0)) return PV_FALSE;\n\n    /* This is the most frequent case : level 10 */\n    if (bitrate_r <= rBR_bound[1] && framerate <= max_h263_framerate[0] &&\n            (width <= max_h263_width[0] && height <= max_h263_height[0]))\n    {\n        *levelID = h263Level[1];\n        return PV_TRUE;\n    }\n    else if (bitrate_r > rBR_bound[4] ||\n             (width > max_h263_width[1] || height > max_h263_height[1]) ||\n             framerate > max_h263_framerate[1])    /* check the highest level 70 */\n    {\n        *levelID = h263Level[7];\n        return PV_TRUE;\n    }\n    else   /* search level 20, 30, 40 */\n    {\n\n        /* pick out level 20 */\n        if (bitrate_r <= rBR_bound[2] &&\n                ((width <= max_h263_width[0] && height <= max_h263_height[0] && framerate <= max_h263_framerate[1]) ||\n                 (width <= max_h263_width[1] && height <= max_h263_height[1] && framerate <= max_h263_framerate[0])))\n        {\n            *levelID = h263Level[2];\n            return PV_TRUE;\n        }\n        else   /* width, height and framerate are ok, now choose level 30 or 40 */\n        {\n            *levelID = (bitrate_r <= rBR_bound[3] ? h263Level[3] : h263Level[4]);\n            return PV_TRUE;\n        }\n    }\n}\n\n/* ======================================================================== */\n/*  Function : PVGetMPEG4ProfileLevelID()                                   */\n/*  Date     : 26/06/2008                                                   */\n/*  Purpose  : Get MPEG4 Level after initialized                            */\n/*  In/out   : profile_level according to interface                         */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\nOSCL_EXPORT_REF Bool PVGetMPEG4ProfileLevelID(VideoEncControls *encCtrl, Int *profile_level, Int nLayer)\n{\n    VideoEncData* video;\n    Int i;\n\n    video = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (nLayer == 0)\n    {\n        for (i = 0; i < MAX_BASE_PROFILE + 1; i++)\n        {\n            if (video->encParams->ProfileLevel[0] == profile_level_code[i])\n            {\n                break;\n            }\n        }\n        *profile_level = i;\n    }\n    else\n    {\n        for (i = 0; i < MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE; i++)\n        {\n            if (video->encParams->ProfileLevel[1] == scalable_profile_level_code[i])\n            {\n                break;\n            }\n        }\n        *profile_level = i + MAX_BASE_PROFILE + 1;\n    }\n\n    return true;\n}\n\n#ifndef LIMITED_API\n/* ======================================================================== */\n/*  Function : PVUpdateEncFrameRate                                         */\n/*  Date     : 04/08/2002                                                   */\n/*  Purpose  : Update target frame rates of the encoded base and enhance    */\n/*             layer(if any) while encoding operation is ongoing            */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF Bool PVUpdateEncFrameRate(VideoEncControls *encCtrl, float *frameRate)\n{\n    VideoEncData    *encData;\n    Int i;// nTotalMB, mbPerSec;\n\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (encData == NULL)\n        return PV_FALSE;\n    if (encData->encParams == NULL)\n        return PV_FALSE;\n\n    /* Update the framerates for all the layers */\n    for (i = 0; i < encData->encParams->nLayers; i++)\n    {\n\n        /* New check: encoding framerate should be consistent with the given profile and level */\n        //nTotalMB = (((encData->encParams->LayerWidth[i]+15)/16)*16)*(((encData->encParams->LayerHeight[i]+15)/16)*16)/(16*16);\n        //mbPerSec = (Int)(nTotalMB * frameRate[i]);\n        //if(mbPerSec > encData->encParams->LayerMaxMbsPerSec[i]) return PV_FALSE;\n        if (frameRate[i] > encData->encParams->LayerMaxFrameRate[i]) return PV_FALSE; /* set by users or profile */\n\n        encData->encParams->LayerFrameRate[i] = frameRate[i];\n    }\n\n    RC_UpdateBXRCParams((void*) encData);\n    return PV_TRUE;\n}\n#endif\n#ifndef LIMITED_API\n/* ======================================================================== */\n/*  Function : PVUpdateBitRate                                              */\n/*  Date     : 04/08/2002                                                   */\n/*  Purpose  : Update target bit rates of the encoded base and enhance      */\n/*             layer(if any) while encoding operation is ongoing            */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF Bool PVUpdateBitRate(VideoEncControls *encCtrl, Int *bitRate)\n{\n    VideoEncData    *encData;\n    Int i;\n\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (encData == NULL)\n        return PV_FALSE;\n    if (encData->encParams == NULL)\n        return PV_FALSE;\n\n    /* Update the bitrates for all the layers */\n    for (i = 0; i < encData->encParams->nLayers; i++)\n    {\n        if (bitRate[i] > encData->encParams->LayerMaxBitRate[i]) /* set by users or profile */\n        {\n            return PV_FALSE;\n        }\n        encData->encParams->LayerBitRate[i] = bitRate[i];\n    }\n\n    RC_UpdateBXRCParams((void*) encData);\n    return PV_TRUE;\n\n}\n#endif\n#ifndef LIMITED_API\n/* ============================================================================ */\n/*  Function : PVUpdateVBVDelay()                                                   */\n/*  Date     : 4/23/2004                                                        */\n/*  Purpose  : Update VBV buffer size(in delay)                                 */\n/*  In/out   :                                                                  */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                        */\n/*  Modified :                                                                  */\n/*                                                                              */\n/* ============================================================================ */\n\nBool PVUpdateVBVDelay(VideoEncControls *encCtrl, float delay)\n{\n\n    VideoEncData    *encData;\n    Int total_bitrate, max_buffer_size;\n    int index;\n\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (encData == NULL)\n        return PV_FALSE;\n    if (encData->encParams == NULL)\n        return PV_FALSE;\n\n    /* Check whether the input delay is valid based on the given profile */\n    total_bitrate   = (encData->encParams->nLayers == 1 ? encData->encParams->LayerBitRate[0] :\n                       encData->encParams->LayerBitRate[1]);\n    index = encData->encParams->profile_table_index;\n    max_buffer_size = (encData->encParams->nLayers == 1 ? profile_level_max_VBV_size[index] :\n                       scalable_profile_level_max_VBV_size[index]);\n\n    if (total_bitrate*delay > (float)max_buffer_size)\n        return PV_FALSE;\n\n    encData->encParams->VBV_delay = delay;\n    return PV_TRUE;\n\n}\n#endif\n#ifndef LIMITED_API\n/* ======================================================================== */\n/*  Function : PVUpdateIFrameInterval()                                         */\n/*  Date     : 04/10/2002                                                   */\n/*  Purpose  : updates the INTRA frame refresh interval while encoding      */\n/*             is ongoing                                                   */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF Bool PVUpdateIFrameInterval(VideoEncControls *encCtrl, Int aIFramePeriod)\n{\n    VideoEncData    *encData;\n\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (encData == NULL)\n        return PV_FALSE;\n    if (encData->encParams == NULL)\n        return PV_FALSE;\n\n    encData->encParams->IntraPeriod = aIFramePeriod;\n    return PV_TRUE;\n}\n#endif\n#ifndef LIMITED_API\n/* ======================================================================== */\n/*  Function : PVSetNumIntraMBRefresh()                                     */\n/*  Date     : 08/05/2003                                                   */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\nOSCL_EXPORT_REF Bool    PVUpdateNumIntraMBRefresh(VideoEncControls *encCtrl, Int numMB)\n{\n    VideoEncData    *encData;\n\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (encData == NULL)\n        return PV_FALSE;\n\n    encData->encParams->Refresh = numMB;\n\n    return PV_TRUE;\n}\n#endif\n#ifndef LIMITED_API\n/* ======================================================================== */\n/*  Function : PVIFrameRequest()                                            */\n/*  Date     : 04/10/2002                                                   */\n/*  Purpose  : encodes the next base frame as an I-Vop                      */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF Bool PVIFrameRequest(VideoEncControls *encCtrl)\n{\n    VideoEncData    *encData;\n\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (encData == NULL)\n        return PV_FALSE;\n    if (encData->encParams == NULL)\n        return PV_FALSE;\n\n    encData->nextEncIVop = 1;\n    return PV_TRUE;\n}\n#endif\n#ifndef LIMITED_API\n/* ======================================================================== */\n/*  Function : PVGetEncMemoryUsage()                                        */\n/*  Date     : 10/17/2000                                                   */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF Int PVGetEncMemoryUsage(VideoEncControls *encCtrl)\n{\n    VideoEncData    *encData;\n\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (encData == NULL)\n        return PV_FALSE;\n    if (encData->encParams == NULL)\n        return PV_FALSE;\n    return encData->encParams->MemoryUsage;\n}\n#endif\n\n/* ======================================================================== */\n/*  Function : PVGetHintTrack()                                             */\n/*  Date     : 1/17/2001,                                                   */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF Bool PVGetHintTrack(VideoEncControls *encCtrl, MP4HintTrack *info)\n{\n    VideoEncData    *encData;\n\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (encData == NULL)\n        return PV_FALSE;\n    if (encData->encParams == NULL)\n        return PV_FALSE;\n    info->MTB = encData->hintTrackInfo.MTB;\n    info->LayerID = encData->hintTrackInfo.LayerID;\n    info->CodeType = encData->hintTrackInfo.CodeType;\n    info->RefSelCode = encData->hintTrackInfo.RefSelCode;\n\n    return PV_TRUE;\n}\n\n/* ======================================================================== */\n/*  Function : PVGetMaxVideoFrameSize()                                     */\n/*  Date     : 7/17/2001,                                                   */\n/*  Purpose  : Function merely returns the maximum buffer size              */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF Bool PVGetMaxVideoFrameSize(VideoEncControls *encCtrl, Int *maxVideoFrameSize)\n{\n    VideoEncData    *encData;\n\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (encData == NULL)\n        return PV_FALSE;\n    if (encData->encParams == NULL)\n        return PV_FALSE;\n\n\n\n    *maxVideoFrameSize = encData->encParams->BufferSize[0];\n\n    if (encData->encParams->nLayers == 2)\n        if (*maxVideoFrameSize < encData->encParams->BufferSize[1])\n            *maxVideoFrameSize = encData->encParams->BufferSize[1];\n    *maxVideoFrameSize >>= 3;   /* Convert to Bytes */\n\n    if (*maxVideoFrameSize <= 4000)\n        *maxVideoFrameSize = 4000;\n\n    return PV_TRUE;\n}\n#ifndef LIMITED_API\n/* ======================================================================== */\n/*  Function : PVGetVBVSize()                                               */\n/*  Date     : 4/15/2002                                                    */\n/*  Purpose  : Function merely returns the maximum buffer size              */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF Bool PVGetVBVSize(VideoEncControls *encCtrl, Int *VBVSize)\n{\n    VideoEncData    *encData;\n\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (encData == NULL)\n        return PV_FALSE;\n    if (encData->encParams == NULL)\n        return PV_FALSE;\n\n    *VBVSize = encData->encParams->BufferSize[0];\n    if (encData->encParams->nLayers == 2)\n        *VBVSize += encData->encParams->BufferSize[1];\n\n    return PV_TRUE;\n\n}\n#endif\n/* ======================================================================== */\n/*  Function : EncodeVOS_Start()                                            */\n/*  Date     : 08/22/2000                                                   */\n/*  Purpose  : Encodes the VOS,VO, and VOL or Short Headers                 */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\nPV_STATUS EncodeVOS_Start(VideoEncControls *encoderControl)\n{\n\n    VideoEncData *video = (VideoEncData *)encoderControl->videoEncoderData;\n    Vol         *currVol = video->vol[video->currLayer];\n    PV_STATUS status = PV_SUCCESS;\n    //int profile_level=0x01;\n    BitstreamEncVideo *stream = video->bitstream1;\n    int i, j;\n\n    /********************************/\n    /* Check for short_video_header */\n    /********************************/\n    if (currVol->shortVideoHeader == 1)\n        return status;\n    else\n    {\n        /* Short Video Header or M4V */\n\n        /**************************/\n        /* VisualObjectSequence ()*/\n        /**************************/\n        status = BitstreamPutGT16Bits(stream, 32, SESSION_START_CODE);\n        /*  Determine profile_level */\n        status = BitstreamPutBits(stream, 8, video->encParams->ProfileLevel[video->currLayer]);\n\n        /******************/\n        /* VisualObject() */\n        /******************/\n\n        status = BitstreamPutGT16Bits(stream, 32, VISUAL_OBJECT_START_CODE);\n        status = BitstreamPut1Bits(stream, 0x00); /* visual object identifier */\n        status = BitstreamPutBits(stream, 4, 0x01); /* visual object Type == \"video ID\" */\n        status = BitstreamPut1Bits(stream, 0x00); /* no video signal type */\n\n        /*temp   = */\n        BitstreamMpeg4ByteAlignStuffing(stream);\n\n\n        status = BitstreamPutGT16Bits(stream, 27, VO_START_CODE);/* byte align: should be 2 bits */\n        status = BitstreamPutBits(stream, 5, 0x00);/*  Video ID = 0  */\n\n\n\n        /**********************/\n        /* VideoObjectLayer() */\n        /**********************/\n        if (currVol->shortVideoHeader == 0)\n        { /* M4V  else Short Video Header */\n            status = BitstreamPutGT16Bits(stream, VOL_START_CODE_LENGTH, VOL_START_CODE);\n            status = BitstreamPutBits(stream, 4, currVol->volID);/*  video_object_layer_id */\n            status = BitstreamPut1Bits(stream, 0x00);/*  Random Access = 0  */\n\n            if (video->currLayer == 0)\n                status = BitstreamPutBits(stream, 8, 0x01);/* Video Object Type Indication = 1  ... Simple Object Type */\n            else\n                status = BitstreamPutBits(stream, 8, 0x02);/* Video Object Type Indication = 2  ... Simple Scalable Object Type */\n\n            status = BitstreamPut1Bits(stream, 0x00);/*  is_object_layer_identifer = 0 */\n\n\n            status = BitstreamPutBits(stream, 4, 0x01); /* aspect_ratio_info = 1 ... 1:1(Square) */\n            status = BitstreamPut1Bits(stream, 0x00);/* vol_control_parameters = 0 */\n            status = BitstreamPutBits(stream, 2, 0x00);/* video_object_layer_shape = 00 ... rectangular */\n            status = BitstreamPut1Bits(stream, 0x01);/* marker bit */\n            status = BitstreamPutGT8Bits(stream, 16, currVol->timeIncrementResolution);/* vop_time_increment_resolution */\n            status = BitstreamPut1Bits(stream, 0x01);/* marker bit */\n            status = BitstreamPut1Bits(stream, currVol->fixedVopRate);/* fixed_vop_rate = 0 */\n\n            /* For Rectangular VO layer shape */\n            status = BitstreamPut1Bits(stream, 0x01);/* marker bit */\n            status = BitstreamPutGT8Bits(stream, 13, currVol->width);/* video_object_layer_width */\n            status = BitstreamPut1Bits(stream, 0x01);/* marker bit */\n            status = BitstreamPutGT8Bits(stream, 13, currVol->height);/* video_object_layer_height */\n            status = BitstreamPut1Bits(stream, 0x01);/*marker bit */\n\n            status = BitstreamPut1Bits(stream, 0x00);/*interlaced = 0 */\n            status = BitstreamPut1Bits(stream, 0x01);/* obmc_disable = 1 */\n            status = BitstreamPut1Bits(stream, 0x00);/* sprite_enable = 0 */\n            status = BitstreamPut1Bits(stream, 0x00);/* not_8_bit = 0 */\n            status = BitstreamPut1Bits(stream, currVol->quantType);/*   quant_type */\n\n            if (currVol->quantType)\n            {\n                status = BitstreamPut1Bits(stream, currVol->loadIntraQuantMat); /* Intra quant matrix */\n                if (currVol->loadIntraQuantMat)\n                {\n                    for (j = 63; j >= 1; j--)\n                        if (currVol->iqmat[*(zigzag_i+j)] != currVol->iqmat[*(zigzag_i+j-1)])\n                            break;\n                    if ((j == 1) && (currVol->iqmat[*(zigzag_i+j)] == currVol->iqmat[*(zigzag_i+j-1)]))\n                        j = 0;\n                    for (i = 0; i < j + 1; i++)\n                        BitstreamPutBits(stream, 8, currVol->iqmat[*(zigzag_i+i)]);\n                    if (j < 63)\n                        BitstreamPutBits(stream, 8, 0);\n                }\n                else\n                {\n                    for (j = 0; j < 64; j++)\n                        currVol->iqmat[j] = mpeg_iqmat_def[j];\n\n                }\n                status = BitstreamPut1Bits(stream, currVol->loadNonIntraQuantMat); /* Non-Intra quant matrix */\n                if (currVol->loadNonIntraQuantMat)\n                {\n                    for (j = 63; j >= 1; j--)\n                        if (currVol->niqmat[*(zigzag_i+j)] != currVol->niqmat[*(zigzag_i+j-1)])\n                            break;\n                    if ((j == 1) && (currVol->niqmat[*(zigzag_i+j)] == currVol->niqmat[*(zigzag_i+j-1)]))\n                        j = 0;\n                    for (i = 0; i < j + 1; i++)\n                        BitstreamPutBits(stream, 8, currVol->niqmat[*(zigzag_i+i)]);\n                    if (j < 63)\n                        BitstreamPutBits(stream, 8, 0);\n                }\n                else\n                {\n                    for (j = 0; j < 64; j++)\n                        currVol->niqmat[j] = mpeg_nqmat_def[j];\n                }\n            }\n\n            status = BitstreamPut1Bits(stream, 0x01);   /* complexity_estimation_disable = 1 */\n            status = BitstreamPut1Bits(stream, currVol->ResyncMarkerDisable);/* Resync_marker_disable */\n            status = BitstreamPut1Bits(stream, currVol->dataPartitioning);/* Data partitioned */\n\n            if (currVol->dataPartitioning)\n                status = BitstreamPut1Bits(stream, currVol->useReverseVLC); /* Reversible_vlc */\n\n\n            if (currVol->scalability) /* Scalability*/\n            {\n\n                status = BitstreamPut1Bits(stream, currVol->scalability);/* Scalability = 1 */\n                status = BitstreamPut1Bits(stream, currVol->scalType);/* hierarchy _type ... Spatial= 0 and Temporal = 1 */\n                status = BitstreamPutBits(stream, 4, currVol->refVolID);/* ref_layer_id  */\n                status = BitstreamPut1Bits(stream, currVol->refSampDir);/* ref_layer_sampling_direc*/\n                status = BitstreamPutBits(stream, 5, currVol->horSamp_n);/*hor_sampling_factor_n*/\n                status = BitstreamPutBits(stream, 5, currVol->horSamp_m);/*hor_sampling_factor_m*/\n                status = BitstreamPutBits(stream, 5, currVol->verSamp_n);/*vert_sampling_factor_n*/\n                status = BitstreamPutBits(stream, 5, currVol->verSamp_m);/*vert_sampling_factor_m*/\n                status = BitstreamPut1Bits(stream, currVol->enhancementType);/* enhancement_type*/\n            }\n            else /* No Scalability */\n                status = BitstreamPut1Bits(stream, currVol->scalability);/* Scalability = 0 */\n\n            /*temp = */\n            BitstreamMpeg4ByteAlignStuffing(stream); /* Byte align Headers for VOP */\n        }\n    }\n\n    return status;\n}\n\n/* ======================================================================== */\n/*  Function : VOS_End()                                                    */\n/*  Date     : 08/22/2000                                                   */\n/*  Purpose  : Visual Object Sequence End                                   */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nPV_STATUS VOS_End(VideoEncControls *encoderControl)\n{\n    PV_STATUS status = PV_SUCCESS;\n    VideoEncData *video = (VideoEncData *)encoderControl->videoEncoderData;\n    Vol         *currVol = video->vol[video->currLayer];\n    BitstreamEncVideo *stream = currVol->stream;\n\n\n    status = BitstreamPutBits(stream, SESSION_END_CODE, 32);\n\n    return status;\n}\n\n/* ======================================================================== */\n/*  Function : DetermineCodingLayer                                         */\n/*  Date     : 06/02/2001                                                   */\n/*  Purpose  : Find layer to code based on current mod time, assuming that\n               it's time to encode enhanced layer.                          */\n/*  In/out   :                                                              */\n/*  Return   : Number of layer to code.                                     */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nInt DetermineCodingLayer(VideoEncData *video, Int *nLayer, ULong modTime)\n{\n    Vol **vol = video->vol;\n    VideoEncParams *encParams = video->encParams;\n    Int numLayers = encParams->nLayers;\n    UInt modTimeRef = video->modTimeRef;\n    float *LayerFrameRate = encParams->LayerFrameRate;\n    UInt frameNum[4], frameTick;\n    ULong frameModTime, nextFrmModTime;\n#ifdef REDUCE_FRAME_VARIANCE    /* To limit how close 2 frames can be */\n    float frameInterval;\n#endif\n    float srcFrameInterval;\n    Int frameInc;\n    Int i, extra_skip;\n    Int encodeVop = 0;\n\n    i = numLayers - 1;\n\n    if (modTime - video->nextModTime > ((ULong)(-1)) >> 1) /* next time wrapped around */\n        return 0; /* not time to code it yet */\n\n    video->relLayerCodeTime[i] -= 1000;\n    video->nextEncIVop--;  /* number of Vops in highest layer resolution. */\n    video->numVopsInGOP++;\n\n    /* from this point frameModTime and nextFrmModTime are internal */\n\n    frameNum[i] = (UInt)((modTime - modTimeRef) * LayerFrameRate[i] + 500) / 1000;\n    if (video->volInitialize[i])\n    {\n        video->prevFrameNum[i] = frameNum[i] - 1;\n    }\n    else if (frameNum[i] <= video->prevFrameNum[i])\n    {\n        return 0; /* do not encode this frame */\n    }\n\n    /**** this part computes expected next frame *******/\n    frameModTime = (ULong)(((frameNum[i] * 1000) / LayerFrameRate[i]) + modTimeRef + 0.5); /* rec. time */\n    nextFrmModTime = (ULong)((((frameNum[i] + 1) * 1000) / LayerFrameRate[i]) + modTimeRef + 0.5); /* rec. time */\n\n    srcFrameInterval = 1000 / video->FrameRate;\n\n    video->nextModTime = nextFrmModTime - (ULong)(srcFrameInterval / 2.) - 1; /* between current and next frame */\n\n#ifdef REDUCE_FRAME_VARIANCE    /* To limit how close 2 frames can be */\n    frameInterval = 1000 / LayerFrameRate[i]; /* next rec. time */\n    delta = (Int)(frameInterval / 4); /* empirical number */\n    if (video->nextModTime - modTime  < (ULong)delta) /* need to move nextModTime further. */\n    {\n        video->nextModTime += ((delta - video->nextModTime + modTime)); /* empirical formula  */\n    }\n#endif\n    /****************************************************/\n\n    /* map frame no.to tick from modTimeRef */\n    /*frameTick = (frameNum[i]*vol[i]->timeIncrementResolution) ;\n    frameTick = (UInt)((frameTick + (encParams->LayerFrameRate[i]/2))/encParams->LayerFrameRate[i]);*/\n    /*  11/16/01, change frameTick to be the closest tick from the actual modTime */\n    /*  12/12/02, add (double) to prevent large number wrap-around */\n    frameTick = (Int)(((double)(modTime - modTimeRef) * vol[i]->timeIncrementResolution + 500) / 1000);\n\n    /* find timeIncrement to be put in the bitstream */\n    /* refTick is second boundary reference. */\n    vol[i]->timeIncrement = frameTick - video->refTick[i];\n\n\n    vol[i]->moduloTimeBase = 0;\n    while (vol[i]->timeIncrement >= vol[i]->timeIncrementResolution)\n    {\n        vol[i]->timeIncrement -= vol[i]->timeIncrementResolution;\n        vol[i]->moduloTimeBase++;\n        /* do not update refTick and modTimeRef yet, do it after encoding!! */\n    }\n\n    if (video->relLayerCodeTime[i] <= 0)    /* no skipping */\n    {\n        encodeVop = 1;\n        video->currLayer = *nLayer = i;\n        video->relLayerCodeTime[i] += 1000;\n\n        /* takes care of more dropped frame than expected */\n        extra_skip = -1;\n        frameInc = (frameNum[i] - video->prevFrameNum[i]);\n        extra_skip += frameInc;\n\n        if (extra_skip > 0)\n        {   /* update rc->Nr, rc->B, (rc->Rr)*/\n            video->nextEncIVop -= extra_skip;\n            video->numVopsInGOP += extra_skip;\n            if (encParams->RC_Type != CONSTANT_Q)\n            {\n                RC_UpdateBuffer(video, i, extra_skip);\n            }\n        }\n\n    }\n    /* update frame no. */\n    video->prevFrameNum[i] = frameNum[i];\n\n    /* go through all lower layer */\n    for (i = (numLayers - 2); i >= 0; i--)\n    {\n\n        video->relLayerCodeTime[i] -= 1000;\n\n        /* find timeIncrement to be put in the bitstream */\n        vol[i]->timeIncrement = frameTick - video->refTick[i];\n\n        if (video->relLayerCodeTime[i] <= 0) /* time to encode base */\n        {\n            /* 12/27/00 */\n            encodeVop = 1;\n            video->currLayer = *nLayer = i;\n            video->relLayerCodeTime[i] +=\n                (Int)((1000.0 * encParams->LayerFrameRate[numLayers-1]) / encParams->LayerFrameRate[i]);\n\n            vol[i]->moduloTimeBase = 0;\n            while (vol[i]->timeIncrement >= vol[i]->timeIncrementResolution)\n            {\n                vol[i]->timeIncrement -= vol[i]->timeIncrementResolution;\n                vol[i]->moduloTimeBase++;\n                /* do not update refTick and modTimeRef yet, do it after encoding!! */\n            }\n\n            /* takes care of more dropped frame than expected */\n            frameNum[i] = (UInt)((frameModTime - modTimeRef) * encParams->LayerFrameRate[i] + 500) / 1000;\n            if (video->volInitialize[i])\n                video->prevFrameNum[i] = frameNum[i] - 1;\n\n            extra_skip = -1;\n            frameInc = (frameNum[i] - video->prevFrameNum[i]);\n            extra_skip += frameInc;\n\n            if (extra_skip > 0)\n            {   /* update rc->Nr, rc->B, (rc->Rr)*/\n                if (encParams->RC_Type != CONSTANT_Q)\n                {\n                    RC_UpdateBuffer(video, i, extra_skip);\n                }\n            }\n            /* update frame no. */\n            video->prevFrameNum[i] = frameNum[i];\n        }\n    }\n\n#ifdef _PRINT_STAT\n    if (encodeVop)\n        printf(\" TI: %d \", vol[*nLayer]->timeIncrement);\n#endif\n\n    return encodeVop;\n}\n\n/* ======================================================================== */\n/*  Function : DetermineVopType                                             */\n/*  Date     : 06/02/2001                                                   */\n/*  Purpose  : The name says it all.                                        */\n/*  In/out   :                                                              */\n/*  Return   : void .                                                       */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nvoid DetermineVopType(VideoEncData *video, Int currLayer)\n{\n    VideoEncParams *encParams = video->encParams;\n//  Vol *currVol = video->vol[currLayer];\n\n    if (encParams->IntraPeriod == 0) /* I-VOPs only */\n    {\n        if (video->currLayer > 0)\n            video->currVop->predictionType = P_VOP;\n        else\n        {\n            video->currVop->predictionType = I_VOP;\n            if (video->numVopsInGOP >= 132)\n                video->numVopsInGOP = 0;\n        }\n    }\n    else if (encParams->IntraPeriod == -1)  /* IPPPPP... */\n    {\n\n        /* maintain frame type if previous frame is pre-skipped, 06/02/2001 */\n        if (encParams->RC_Type == CONSTANT_Q || video->rc[currLayer]->skip_next_frame != -1)\n            video->currVop->predictionType = P_VOP;\n\n        if (video->currLayer == 0)\n        {\n            if (/*video->numVopsInGOP>=132 || */video->volInitialize[currLayer])\n            {\n                video->currVop->predictionType = I_VOP;\n                video->numVopsInGOP = 0; /* force INTRA update every 132 base frames*/\n                video->nextEncIVop = 1;\n            }\n            else if (video->nextEncIVop == 0 || video->currVop->predictionType == I_VOP)\n            {\n                video->numVopsInGOP = 0;\n                video->nextEncIVop = 1;\n            }\n        }\n    }\n    else   /* IntraPeriod>0 : IPPPPPIPPPPPI... */\n    {\n\n        /* maintain frame type if previous frame is pre-skipped, 06/02/2001 */\n        if (encParams->RC_Type == CONSTANT_Q || video->rc[currLayer]->skip_next_frame != -1)\n            video->currVop->predictionType = P_VOP;\n\n        if (currLayer == 0)\n        {\n            if (video->nextEncIVop <= 0 || video->currVop->predictionType == I_VOP)\n            {\n                video->nextEncIVop = encParams->IntraPeriod;\n                video->currVop->predictionType = I_VOP;\n                video->numVopsInGOP = 0;\n            }\n        }\n    }\n\n    return ;\n}\n\n/* ======================================================================== */\n/*  Function : UpdateSkipNextFrame                                          */\n/*  Date     : 06/02/2001                                                   */\n/*  Purpose  : From rate control frame skipping decision, update timing\n                related parameters.                                         */\n/*  In/out   :                                                              */\n/*  Return   : Current coded layer.                                         */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nInt UpdateSkipNextFrame(VideoEncData *video, ULong *modTime, Int *size, PV_STATUS status)\n{\n    Int currLayer = video->currLayer;\n    Int nLayer = currLayer;\n    VideoEncParams *encParams = video->encParams;\n    Int numLayers = encParams->nLayers;\n    Vol *currVol = video->vol[currLayer];\n    Vol **vol = video->vol;\n    Int num_skip, extra_skip;\n    Int i;\n    UInt newRefTick, deltaModTime;\n    UInt temp;\n\n    if (encParams->RC_Type != CONSTANT_Q)\n    {\n        if (video->volInitialize[0] && currLayer == 0)  /* always encode the first frame */\n        {\n            RC_ResetSkipNextFrame(video, currLayer);\n            //return currLayer;  09/15/05\n        }\n        else\n        {\n            if (RC_GetSkipNextFrame(video, currLayer) < 0 || status == PV_END_OF_BUF)   /* Skip Current Frame */\n            {\n\n#ifdef _PRINT_STAT\n                printf(\"Skip current frame\");\n#endif\n                currVol->moduloTimeBase = currVol->prevModuloTimeBase;\n\n                /*********************/\n                /* prepare to return */\n                /*********************/\n                *size = 0;  /* Set Bitstream buffer to zero */\n\n                /* Determine nLayer and modTime for next encode */\n\n                *modTime = video->nextModTime;\n                nLayer = -1;\n\n                return nLayer; /* return immediately without updating RefTick & modTimeRef */\n                /* If I-VOP was attempted, then ensure next base is I-VOP */\n                /*if((encParams->IntraPeriod>0) && (video->currVop->predictionType == I_VOP))\n                video->nextEncIVop = 0; commented out by 06/05/01 */\n\n            }\n            else if ((num_skip = RC_GetSkipNextFrame(video, currLayer)) > 0)\n            {\n\n#ifdef _PRINT_STAT\n                printf(\"Skip next %d frames\", num_skip);\n#endif\n                /* to keep the Nr of enh layer the same */\n                /* adjust relLayerCodeTime only, do not adjust layerCodeTime[numLayers-1] */\n                extra_skip = 0;\n                for (i = 0; i < currLayer; i++)\n                {\n                    if (video->relLayerCodeTime[i] <= 1000)\n                    {\n                        extra_skip = 1;\n                        break;\n                    }\n                }\n\n                for (i = currLayer; i < numLayers; i++)\n                {\n                    video->relLayerCodeTime[i] += (num_skip + extra_skip) *\n                                                  ((Int)((1000.0 * encParams->LayerFrameRate[numLayers-1]) / encParams->LayerFrameRate[i]));\n                }\n            }\n        }/* first frame */\n    }\n    /*****  current frame is encoded, now update refTick ******/\n\n    video->refTick[currLayer] += vol[currLayer]->prevModuloTimeBase * vol[currLayer]->timeIncrementResolution;\n\n    /* Reset layerCodeTime every I-VOP to prevent overflow */\n    if (currLayer == 0)\n    {\n        /*  12/12/02, fix for weird targer frame rate of 9.99 fps or 3.33 fps */\n        if (((encParams->IntraPeriod != 0) /*&& (video->currVop->predictionType==I_VOP)*/) ||\n                ((encParams->IntraPeriod == 0) && (video->numVopsInGOP == 0)))\n        {\n            newRefTick = video->refTick[0];\n\n            for (i = 1; i < numLayers; i++)\n            {\n                if (video->refTick[i] < newRefTick)\n                    newRefTick = video->refTick[i];\n            }\n\n            /* check to make sure that the update is integer multiple of frame number */\n            /* how many msec elapsed from last modTimeRef */\n            deltaModTime = (newRefTick / vol[0]->timeIncrementResolution) * 1000;\n\n            for (i = numLayers - 1; i >= 0; i--)\n            {\n                temp = (UInt)(deltaModTime * encParams->LayerFrameRate[i]); /* 12/12/02 */\n                if (temp % 1000)\n                    newRefTick = 0;\n\n            }\n            if (newRefTick > 0)\n            {\n                video->modTimeRef += deltaModTime;\n                for (i = numLayers - 1; i >= 0; i--)\n                {\n                    video->prevFrameNum[i] -= (UInt)(deltaModTime * encParams->LayerFrameRate[i]) / 1000;\n                    video->refTick[i] -= newRefTick;\n                }\n            }\n        }\n    }\n\n    *modTime =  video->nextModTime;\n\n    return nLayer;\n}\n\n\n#ifndef ORIGINAL_VERSION\n\n/* ======================================================================== */\n/*  Function : SetProfile_BufferSize                                        */\n/*  Date     : 04/08/2002                                                   */\n/*  Purpose  : Set profile and video buffer size, copied from Jim's code    */\n/*             in PVInitVideoEncoder(.), since we have different places     */\n/*             to reset profile and video buffer size                       */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nBool SetProfile_BufferSize(VideoEncData *video, float delay, Int bInitialized)\n{\n    Int i, j, start, end;\n//  Int BaseMBsPerSec = 0, EnhMBsPerSec = 0;\n    Int nTotalMB = 0;\n    Int idx, temp_w, temp_h, max = 0, max_width, max_height;\n\n    Int nLayers = video->encParams->nLayers; /* Number of Layers to be encoded */\n\n    Int total_bitrate = 0, base_bitrate;\n    Int total_packet_size = 0, base_packet_size;\n    Int total_MBsPerSec = 0, base_MBsPerSec;\n    Int total_VBV_size = 0, base_VBV_size, enhance_VBV_size = 0;\n    float total_framerate, base_framerate;\n    float upper_bound_ratio;\n    Int bFound = 0;\n    Int k = 0, width16, height16, index;\n    Int lowest_level;\n\n#define MIN_BUFF    16000 /* 16k minimum buffer size */\n#define BUFF_CONST  2.0    /* 2000ms */\n#define UPPER_BOUND_RATIO 8.54 /* upper_bound = 1.4*(1.1+bound/10)*bitrate/framerate */\n\n#define QCIF_WIDTH  176\n#define QCIF_HEIGHT 144\n\n    index = video->encParams->profile_table_index;\n\n    /* Calculate \"nTotalMB\" */\n    /* Find the maximum width*height for memory allocation of the VOPs */\n    for (idx = 0; idx < nLayers; idx++)\n    {\n        temp_w = video->encParams->LayerWidth[idx];\n        temp_h = video->encParams->LayerHeight[idx];\n\n        if ((temp_w*temp_h) > max)\n        {\n            max = temp_w * temp_h;\n            max_width = temp_w;\n            max_height = temp_h;\n            nTotalMB = ((max_width + 15) >> 4) * ((max_height + 15) >> 4);\n        }\n    }\n    upper_bound_ratio = (video->encParams->RC_Type == CBR_LOWDELAY ? (float)5.0 : (float)UPPER_BOUND_RATIO);\n\n\n    /* Get the basic information: bitrate, packet_size, MBs/s and VBV_size */\n    base_bitrate        = video->encParams->LayerBitRate[0];\n    if (video->encParams->LayerMaxBitRate[0] != 0) /* video->encParams->LayerMaxBitRate[0] == 0 means it has not been set */\n    {\n        base_bitrate    = PV_MAX(base_bitrate, video->encParams->LayerMaxBitRate[0]);\n    }\n    else /* if the max is not set, set it to the specified profile/level */\n    {\n        video->encParams->LayerMaxBitRate[0] = profile_level_max_bitrate[index];\n    }\n\n    base_framerate      = video->encParams->LayerFrameRate[0];\n    if (video->encParams->LayerMaxFrameRate[0] != 0)\n    {\n        base_framerate  = PV_MAX(base_framerate, video->encParams->LayerMaxFrameRate[0]);\n    }\n    else /* if the max is not set, set it to the specified profile/level */\n    {\n        video->encParams->LayerMaxFrameRate[0] = (float)profile_level_max_mbsPerSec[index] / nTotalMB;\n    }\n\n    base_packet_size    = video->encParams->ResyncPacketsize;\n    base_MBsPerSec      = (Int)(base_framerate * nTotalMB);\n    base_VBV_size       = PV_MAX((Int)(base_bitrate * delay),\n                                 (Int)(upper_bound_ratio * base_bitrate / base_framerate));\n    base_VBV_size       = PV_MAX(base_VBV_size, MIN_BUFF);\n\n    /* if the buffer is larger than maximum buffer size, we'll clip it */\n    if (base_VBV_size > profile_level_max_VBV_size[SIMPLE_PROFILE_LEVEL5])\n        base_VBV_size = profile_level_max_VBV_size[SIMPLE_PROFILE_LEVEL5];\n\n    /* Check if the buffer exceeds the maximum buffer size given the maximum profile and level */\n    if (nLayers == 1 && base_VBV_size > profile_level_max_VBV_size[index])\n        return FALSE;\n\n\n    if (nLayers == 2) /* check both enhanced and base layer */\n    {\n\n        total_bitrate       = video->encParams->LayerBitRate[1];\n        if (video->encParams->LayerMaxBitRate[1] != 0)\n        {\n            total_bitrate   = PV_MIN(total_bitrate, video->encParams->LayerMaxBitRate[1]);\n        }\n        else /* if the max is not set, set it to the specified profile/level */\n        {\n            video->encParams->LayerMaxBitRate[1] = scalable_profile_level_max_bitrate[index];\n        }\n\n        total_framerate     = video->encParams->LayerFrameRate[1];\n        if (video->encParams->LayerMaxFrameRate[1] != 0)\n        {\n            total_framerate     = PV_MIN(total_framerate, video->encParams->LayerMaxFrameRate[1]);\n        }\n        else /* if the max is not set, set it to the specified profile/level */\n        {\n            video->encParams->LayerMaxFrameRate[1] = (float)scalable_profile_level_max_mbsPerSec[index] / nTotalMB;\n        }\n\n        total_packet_size   = video->encParams->ResyncPacketsize;\n        total_MBsPerSec     = (Int)(total_framerate * nTotalMB);\n\n        enhance_VBV_size    = PV_MAX((Int)((total_bitrate - base_bitrate) * delay),\n                                     (Int)(upper_bound_ratio * (total_bitrate - base_bitrate) / (total_framerate - base_framerate)));\n        enhance_VBV_size    = PV_MAX(enhance_VBV_size, MIN_BUFF);\n\n        total_VBV_size      = base_VBV_size + enhance_VBV_size;\n\n        /* if the buffer is larger than maximum buffer size, we'll clip it */\n        if (total_VBV_size > scalable_profile_level_max_VBV_size[CORE_SCALABLE_PROFILE_LEVEL3 - MAX_BASE_PROFILE - 1])\n        {\n            total_VBV_size = scalable_profile_level_max_VBV_size[CORE_SCALABLE_PROFILE_LEVEL3 - MAX_BASE_PROFILE - 1];\n            enhance_VBV_size = total_VBV_size - base_VBV_size;\n        }\n\n        /* Check if the buffer exceeds the maximum buffer size given the maximum profile and level */\n        if (total_VBV_size > scalable_profile_level_max_VBV_size[index])\n            return FALSE;\n    }\n\n\n    if (!bInitialized) /* Has been initialized --> profile @ level has been figured out! */\n    {\n        video->encParams->BufferSize[0] = base_VBV_size;\n        if (nLayers > 1)\n            video->encParams->BufferSize[1] = enhance_VBV_size;\n\n        return PV_TRUE;\n    }\n\n\n    /* Profile @ level determination */\n    if (nLayers == 1)\n    {\n        /* check other parameters */\n        /* BASE ONLY : Simple Profile(SP) Or Core Profile(CP) */\n        if (base_bitrate     > profile_level_max_bitrate[index]     ||\n                base_packet_size > profile_level_max_packet_size[index] ||\n                base_MBsPerSec   > profile_level_max_mbsPerSec[index]   ||\n                base_VBV_size    > profile_level_max_VBV_size[index])\n\n            return PV_FALSE; /* Beyond the bound of Core Profile @ Level2 */\n\n        /* For H263/Short header, determine k*16384 */\n        /* This part only applies to Short header mode, but not H.263 */\n        width16  = ((video->encParams->LayerWidth[0] + 15) >> 4) << 4;\n        height16 = ((video->encParams->LayerHeight[0] + 15) >> 4) << 4;\n        if (video->encParams->H263_Enabled)\n        {\n            k = 4;\n            if (width16  == 2*QCIF_WIDTH && height16 == 2*QCIF_HEIGHT)  /* CIF */\n                k = 16;\n\n            else if (width16  == 4*QCIF_WIDTH && height16 == 4*QCIF_HEIGHT)  /* 4CIF */\n                k = 32;\n\n            else if (width16  == 8*QCIF_WIDTH && height16 == 8*QCIF_HEIGHT)  /* 16CIF */\n                k = 64;\n\n            video->encParams->maxFrameSize  = k * 16384;\n\n            /* Make sure the buffer size is limited to the top profile and level: the Core profile and level 2 */\n            /* AGI RCS 08/12/09 */\n            if (base_VBV_size > (Int)(video->encParams->maxFrameSize + video->encParams->VBV_delay*(float)profile_level_max_bitrate[SIMPLE_PROFILE_LEVEL5]))\n                base_VBV_size = (Int)(video->encParams->maxFrameSize + video->encParams->VBV_delay*(float)profile_level_max_bitrate[SIMPLE_PROFILE_LEVEL5]);\n\n            if (base_VBV_size > (Int)(video->encParams->maxFrameSize + video->encParams->VBV_delay*(float)profile_level_max_bitrate[index]))\n                return PV_FALSE;\n        }\n\n        /* Search the appropriate profile@level index */\n        if (!video->encParams->H263_Enabled &&\n                (video->encParams->IntraDCVlcThr != 0 || video->encParams->SearchRange > 16))\n        {\n            lowest_level = SIMPLE_PROFILE_LEVEL1; /* cannot allow SPL0 */\n        }\n        else\n        {\n            lowest_level = SIMPLE_PROFILE_LEVEL0; /* SPL0 */\n        }\n\n        for (i = lowest_level; i <= index; i++)\n        {\n            /* Since CPL1 is smaller than SPL4A, SPL5, this search favors Simple Profile.  */\n\n            if (base_bitrate     <= profile_level_max_bitrate[i]     &&\n                    base_packet_size <= profile_level_max_packet_size[i] &&\n                    base_MBsPerSec   <= profile_level_max_mbsPerSec[i]   &&\n                    /* AGI  RCS 08/12/09 */\n                    base_VBV_size    <= (video->encParams->H263_Enabled ? (Int)(k*16384 + video->encParams->VBV_delay*(float)profile_level_max_bitrate[i]) :\n\t\t\t\t\t\t\t\t\t\tprofile_level_max_VBV_size[i]))\n                    break;\n        }\n        if (i > index) return PV_FALSE; /* Nothing found!! */\n\n        /* Found out the actual profile @ level : index \"i\" */\n        if (i == 0)\n        {\n            /* For Simple Profile @ Level 0, we need to do one more check: image size <= QCIF */\n            if (width16 > QCIF_WIDTH || height16 > QCIF_HEIGHT)\n                i = 1; /* image size > QCIF, then set SP level1 */\n        }\n\n        video->encParams->ProfileLevel[0] = profile_level_code[i];\n        video->encParams->BufferSize[0]   = base_VBV_size;\n\n        if (video->encParams->LayerMaxBitRate[0] == 0)\n            video->encParams->LayerMaxBitRate[0] = profile_level_max_bitrate[i];\n\n        if (video->encParams->LayerMaxFrameRate[0] == 0)\n            video->encParams->LayerMaxFrameRate[0] = PV_MIN(30, (float)profile_level_max_mbsPerSec[i] / nTotalMB);\n\n        /* For H263/Short header, one special constraint for VBV buffer size */\n        if (video->encParams->H263_Enabled)\n        \t/* AGI RCS 08/12/09 */\n        \tvideo->encParams->BufferSize[0] = (Int)(k * 16384 + 4 * (float)profile_level_max_bitrate[i] * (video->encParams->TimeIncrementRes / video->FrameRate) / video->encParams->TimeIncrementRes);\n    }\n    else\n    {\n        /* SCALABALE MODE: Simple Scalable Profile(SSP) Or Core Scalable Profile(CSP) */\n\n        if (total_bitrate       > scalable_profile_level_max_bitrate[index]     ||\n                total_packet_size   > scalable_profile_level_max_packet_size[index] ||\n                total_MBsPerSec     > scalable_profile_level_max_mbsPerSec[index]   ||\n                total_VBV_size      > scalable_profile_level_max_VBV_size[index])\n\n            return PV_FALSE; /* Beyond given profile and level */\n\n        /* One-time check: Simple Scalable Profile or Core Scalable Profile */\n        if (total_bitrate       <= scalable_profile_level_max_bitrate[CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1]        &&\n                total_packet_size   <= scalable_profile_level_max_packet_size[CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1]    &&\n                total_MBsPerSec     <= scalable_profile_level_max_mbsPerSec[CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1]      &&\n                total_VBV_size      <= scalable_profile_level_max_VBV_size[CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1])\n\n        {\n            start = 0;\n            end = index;\n        }\n\n        else\n        {\n            start = CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1;\n            end = index;\n        }\n\n\n        /* Search the scalable profile */\n        for (i = start; i <= end; i++)\n        {\n            if (total_bitrate       <= scalable_profile_level_max_bitrate[i]     &&\n                    total_packet_size   <= scalable_profile_level_max_packet_size[i] &&\n                    total_MBsPerSec     <= scalable_profile_level_max_mbsPerSec[i]   &&\n                    total_VBV_size      <= scalable_profile_level_max_VBV_size[i])\n\n                break;\n        }\n        if (i > end) return PV_FALSE;\n\n        /* Search for matching base profile */\n        if (i == 0)\n        {\n            j = 0;\n            bFound = 1;\n        }\n        else        bFound = 0;\n\n        if (i >= CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1)\n        {\n            start = CORE_PROFILE_LEVEL1;  /* range for CORE PROFILE  */\n            end = CORE_PROFILE_LEVEL2;\n        }\n        else\n        {\n            start = SIMPLE_PROFILE_LEVEL0;  /* range for SIMPLE PROFILE */\n            end = SIMPLE_PROFILE_LEVEL5;\n        }\n\n        for (j = start; !bFound && j <= end; j++)\n        {\n            if (base_bitrate        <= profile_level_max_bitrate[j]      &&\n                    base_packet_size    <= profile_level_max_packet_size[j]  &&\n                    base_MBsPerSec      <= profile_level_max_mbsPerSec[j]    &&\n                    base_VBV_size       <= profile_level_max_VBV_size[j])\n\n            {\n                bFound = 1;\n                break;\n            }\n        }\n\n        if (!bFound) // && start == 4)\n            return PV_FALSE; /* mis-match in the profiles between base layer and enhancement layer */\n\n        /* j for base layer, i for enhancement layer */\n        video->encParams->ProfileLevel[0] = profile_level_code[j];\n        video->encParams->ProfileLevel[1] = scalable_profile_level_code[i];\n        video->encParams->BufferSize[0]   = base_VBV_size;\n        video->encParams->BufferSize[1]   = enhance_VBV_size;\n\n        if (video->encParams->LayerMaxBitRate[0] == 0)\n            video->encParams->LayerMaxBitRate[0] = profile_level_max_bitrate[j];\n\n        if (video->encParams->LayerMaxBitRate[1] == 0)\n            video->encParams->LayerMaxBitRate[1] = scalable_profile_level_max_bitrate[i];\n\n        if (video->encParams->LayerMaxFrameRate[0] == 0)\n            video->encParams->LayerMaxFrameRate[0] = PV_MIN(30, (float)profile_level_max_mbsPerSec[j] / nTotalMB);\n\n        if (video->encParams->LayerMaxFrameRate[1] == 0)\n            video->encParams->LayerMaxFrameRate[1] = PV_MIN(30, (float)scalable_profile_level_max_mbsPerSec[i] / nTotalMB);\n\n\n    } /* end of: if(nLayers == 1) */\n\n\n    if (!video->encParams->H263_Enabled && (video->encParams->ProfileLevel[0] == 0x08)) /* SPL0 restriction*/\n    {\n        /* PV only allow frame-based rate control, no QP change from one MB to another\n        if(video->encParams->ACDCPrediction == TRUE && MB-based rate control)\n         return PV_FALSE */\n    }\n\n    return PV_TRUE;\n}\n\n#endif /* #ifndef ORIGINAL_VERSION */\n\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/mp4enc_api.cpp.original",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2010 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n\n#include \"mp4enc_lib.h\"\n#include \"bitstream_io.h\"\n#include \"rate_control.h\"\n#include \"m4venc_oscl.h\"\n\n\n/* Inverse normal zigzag */\nconst static Int zigzag_i[NCOEFF_BLOCK] =\n{\n    0, 1, 8, 16, 9, 2, 3, 10,\n    17, 24, 32, 25, 18, 11, 4, 5,\n    12, 19, 26, 33, 40, 48, 41, 34,\n    27, 20, 13, 6, 7, 14, 21, 28,\n    35, 42, 49, 56, 57, 50, 43, 36,\n    29, 22, 15, 23, 30, 37, 44, 51,\n    58, 59, 52, 45, 38, 31, 39, 46,\n    53, 60, 61, 54, 47, 55, 62, 63\n};\n\n/* INTRA */\nconst static Int mpeg_iqmat_def[NCOEFF_BLOCK] =\n    {  8, 17, 18, 19, 21, 23, 25, 27,\n       17, 18, 19, 21, 23, 25, 27, 28,\n       20, 21, 22, 23, 24, 26, 28, 30,\n       21, 22, 23, 24, 26, 28, 30, 32,\n       22, 23, 24, 26, 28, 30, 32, 35,\n       23, 24, 26, 28, 30, 32, 35, 38,\n       25, 26, 28, 30, 32, 35, 38, 41,\n       27, 28, 30, 32, 35, 38, 41, 45\n    };\n\n/* INTER */\nconst static Int mpeg_nqmat_def[64]  =\n    { 16, 17, 18, 19, 20, 21, 22, 23,\n      17, 18, 19, 20, 21, 22, 23, 24,\n      18, 19, 20, 21, 22, 23, 24, 25,\n      19, 20, 21, 22, 23, 24, 26, 27,\n      20, 21, 22, 23, 25, 26, 27, 28,\n      21, 22, 23, 24, 26, 27, 28, 30,\n      22, 23, 24, 26, 27, 28, 30, 31,\n      23, 24, 25, 27, 28, 30, 31, 33\n    };\n\n/* Profiles and levels */\n/* Simple profile(level 0-3) and Core profile (level 1-2) */\n/* {SPL0, SPL1, SPL2, SPL3, SPL4a, SPL5, CPL1, CPL2} , SPL0: Simple Profile@Level0, CPL1: Core Profile@Level1 */\nconst static Int profile_level_code[MAX_BASE_PROFILE+1] =\n{\n    0x08, 0x01, 0x02, 0x03, 0x04, 0x05, 0x21, 0x22\n};\n\nconst static Int profile_level_max_bitrate[MAX_BASE_PROFILE+1] =\n{\n    64000, 64000, 128000, 384000, 4000000, 8000000, 384000, 2000000\n};\n\nconst static Int profile_level_max_packet_size[MAX_BASE_PROFILE+1] =\n{\n    2048, 2048, 4096, 8192, 16384, 16384, 4096, 8192\n};\n\nconst static Int profile_level_max_mbsPerSec[MAX_BASE_PROFILE+1] =\n{\n    1485, 1485, 5940, 11880, 36000, 40500, 5940, 23760\n};\n\nconst static Int profile_level_max_VBV_size[MAX_BASE_PROFILE+1] =\n{\n    163840, 163840, 655360, 655360, 1310720, 1835008, 262144, 1310720\n};\n\n\n/* Scalable profiles for nLayers = 2 */\n/* Simple scalable profile (level 0-2) and Core scalable profile (level 1-3) */\n/* {SSPL0, SSPL1, SSPL2, CSPL1, CSPL2, CSPL3} , SSPL0: Simple Scalable Profile@Level0, CSPL1: Core Scalable Profile@Level1, the fourth is redundant for easy table manipulation */\n\nconst static Int scalable_profile_level_code[MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE] =\n{\n    0x10, 0x11, 0x12, 0xA1, 0xA2, 0xA3\n};\n\nconst static Int scalable_profile_level_max_bitrate[MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE] =\n{\n    128000, 128000, 256000, 768000, 1500000, 4000000\n};\n\n/* in bits */\nconst static Int scalable_profile_level_max_packet_size[MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE] =\n{\n    2048, 2048, 4096, 4096, 4096, 16384\n};\n\nconst static Int scalable_profile_level_max_mbsPerSec[MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE] =\n{\n    1485, 7425, 23760, 14850, 29700, 120960\n};\n\nconst static Int scalable_profile_level_max_VBV_size[MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE] =\n{\n    163840, 655360, 655360, 1048576, 1310720, 1310720\n};\n\n\n/* H263 profile 0 @ level 10-70 */\nconst static Int   h263Level[8] = {0, 10, 20, 30, 40, 50, 60, 70};\nconst static float rBR_bound[8] = {0, 1, 2, 6, 32, 64, 128, 256};\nconst static float max_h263_framerate[2] = {(float)30000 / (float)2002,\n        (float)30000 / (float)1001\n                                           };\nconst static Int   max_h263_width[2]  = {176, 352};\nconst static Int   max_h263_height[2] = {144, 288};\n\n/* 6/2/2001, newly added functions to make PVEncodeVop more readable. */\nInt DetermineCodingLayer(VideoEncData *video, Int *nLayer, ULong modTime);\nvoid DetermineVopType(VideoEncData *video, Int currLayer);\nInt UpdateSkipNextFrame(VideoEncData *video, ULong *modTime, Int *size, PV_STATUS status);\nBool SetProfile_BufferSize(VideoEncData *video, float delay, Int bInitialized);\n\n#ifdef PRINT_RC_INFO\nextern FILE *facct;\nextern int tiTotalNumBitsGenerated;\nextern int iStuffBits;\n#endif\n\n#ifdef PRINT_EC\nextern FILE *fec;\n#endif\n\n\n/* ======================================================================== */\n/*  Function : PVGetDefaultEncOption()                                      */\n/*  Date     : 12/12/2005                                                   */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF Bool PVGetDefaultEncOption(VideoEncOptions *encOption, Int encUseCase)\n{\n    VideoEncOptions defaultUseCase = {H263_MODE, profile_level_max_packet_size[SIMPLE_PROFILE_LEVEL0] >> 3,\n                                      SIMPLE_PROFILE_LEVEL0, PV_OFF, 0, 1, 1000, 33, {144, 144}, {176, 176}, {15, 30}, {64000, 128000},\n                                      {10, 10}, {12, 12}, {0, 0}, CBR_1, 0.0, PV_OFF, -1, 0, PV_OFF, 16, PV_OFF, 0, PV_ON\n                                     };\n\n    OSCL_UNUSED_ARG(encUseCase); // unused for now. Later we can add more defaults setting and use this\n    // argument to select the right one.\n    /* in the future we can create more meaningful use-cases */\n    if (encOption == NULL)\n    {\n        return PV_FALSE;\n    }\n\n    M4VENC_MEMCPY(encOption, &defaultUseCase, sizeof(VideoEncOptions));\n\n    return PV_TRUE;\n}\n\n/* ======================================================================== */\n/*  Function : PVInitVideoEncoder()                                         */\n/*  Date     : 08/22/2000                                                   */\n/*  Purpose  : Initialization of MP4 Encoder and VO bitstream               */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :  5/21/01, allocate only yChan and assign uChan & vChan   */\n/*              12/12/05, add encoding option as input argument         */\n/* ======================================================================== */\nOSCL_EXPORT_REF Bool    PVInitVideoEncoder(VideoEncControls *encoderControl, VideoEncOptions *encOption)\n{\n\n    Bool        status = PV_TRUE;\n    Int         nLayers, idx, i, j;\n    Int         max = 0, max_width = 0, max_height = 0, pitch, offset;\n    Int         size = 0, nTotalMB = 0;\n    VideoEncData *video;\n    Vol         *pVol;\n    VideoEncParams  *pEncParams;\n    Int         temp_w, temp_h, mbsPerSec;\n\n    /******************************************/\n    /*      this part use to be PVSetEncode() */\n    Int profile_table_index, *profile_level_table;\n    Int profile_level = encOption->profile_level;\n    Int PacketSize = encOption->packetSize << 3;\n    Int timeInc, timeIncRes;\n    float profile_max_framerate;\n    VideoEncParams *encParams;\n\n    if (encoderControl->videoEncoderData) /* this has been called */\n    {\n        if (encoderControl->videoEncoderInit) /* check if PVInitVideoEncoder() has been called  */\n        {\n            PVCleanUpVideoEncoder(encoderControl);\n            encoderControl->videoEncoderInit = 0;\n        }\n\n        M4VENC_FREE(encoderControl->videoEncoderData);\n        encoderControl->videoEncoderData = NULL;\n    }\n    encoderControl->videoEncoderInit = 0;   /* reset this value */\n\n    video = (VideoEncData *)M4VENC_MALLOC(sizeof(VideoEncData)); /* allocate memory for encData */\n\n    if (video == NULL)\n        return PV_FALSE;\n\n    M4VENC_MEMSET(video, 0, sizeof(VideoEncData));\n\n    encoderControl->videoEncoderData = (void *) video;         /* set up pointer in VideoEncData structure */\n\n    video->encParams = (VideoEncParams *)M4VENC_MALLOC(sizeof(VideoEncParams));\n    if (video->encParams == NULL)\n        goto CLEAN_UP;\n\n    M4VENC_MEMSET(video->encParams, 0, sizeof(VideoEncParams));\n\n    encParams = video->encParams;\n    encParams->nLayers = encOption->numLayers;\n\n    /* Check whether the input packetsize is valid (Note: put code here (before any memory allocation) in order to avoid memory leak */\n    if ((Int)profile_level <= (Int)(MAX_BASE_PROFILE))  /* non-scalable profile */\n    {\n        profile_level_table = (Int *)profile_level_max_packet_size;\n        profile_table_index = (Int)profile_level;\n        if (encParams->nLayers != 1)\n        {\n            goto CLEAN_UP;\n        }\n\n        encParams->LayerMaxMbsPerSec[0] = profile_level_max_mbsPerSec[profile_table_index];\n\n    }\n    else   /* scalable profile */\n    {\n        profile_level_table = (Int *)scalable_profile_level_max_packet_size;\n        profile_table_index = (Int)profile_level - (Int)(MAX_BASE_PROFILE) - 1;\n        if (encParams->nLayers < 2)\n        {\n            goto CLEAN_UP;\n        }\n        for (i = 0; i < encParams->nLayers; i++)\n        {\n            encParams->LayerMaxMbsPerSec[i] = scalable_profile_level_max_mbsPerSec[profile_table_index];\n        }\n\n    }\n\n    /* cannot have zero size packet with these modes */\n    if (PacketSize == 0)\n    {\n        if (encOption->encMode == DATA_PARTITIONING_MODE)\n        {\n            goto CLEAN_UP;\n        }\n        if (encOption->encMode == COMBINE_MODE_WITH_ERR_RES)\n        {\n            encOption->encMode = COMBINE_MODE_NO_ERR_RES;\n        }\n    }\n\n    if (encOption->gobHeaderInterval == 0)\n    {\n        if (encOption->encMode == H263_MODE_WITH_ERR_RES)\n        {\n            encOption->encMode = H263_MODE;\n        }\n\n        if (encOption->encMode == SHORT_HEADER_WITH_ERR_RES)\n        {\n            encOption->encMode = SHORT_HEADER;\n        }\n    }\n\n    if (PacketSize > profile_level_table[profile_table_index])\n        goto CLEAN_UP;\n\n    /* Initial Defaults for all Modes */\n\n    encParams->SequenceStartCode = 1;\n    encParams->GOV_Enabled = 0;\n    encParams->RoundingType = 0;\n    encParams->IntraDCVlcThr = PV_MAX(PV_MIN(encOption->intraDCVlcTh, 7), 0);\n    encParams->ACDCPrediction = ((encOption->useACPred == PV_ON) ? TRUE : FALSE);\n    encParams->RC_Type = encOption->rcType;\n    encParams->Refresh = encOption->numIntraMB;\n    encParams->ResyncMarkerDisable = 0; /* Enable Resync Marker */\n\n    for (i = 0; i < encOption->numLayers; i++)\n    {\n#ifdef NO_MPEG_QUANT\n        encParams->QuantType[i] = 0;\n#else\n        encParams->QuantType[i] = encOption->quantType[i];      /* H263 */\n#endif\n        if (encOption->pQuant[i] >= 1 && encOption->pQuant[i] <= 31)\n        {\n            encParams->InitQuantPvop[i] = encOption->pQuant[i];\n        }\n        else\n        {\n            goto CLEAN_UP;\n        }\n        if (encOption->iQuant[i] >= 1 && encOption->iQuant[i] <= 31)\n        {\n            encParams->InitQuantIvop[i] = encOption->iQuant[i];\n        }\n        else\n        {\n            goto CLEAN_UP;\n        }\n    }\n\n    encParams->HalfPel_Enabled = 1;\n    encParams->SearchRange = encOption->searchRange; /* 4/16/2001 */\n    encParams->FullSearch_Enabled = 0;\n#ifdef NO_INTER4V\n    encParams->MV8x8_Enabled = 0;\n#else\n    encParams->MV8x8_Enabled = 0;// comment out for now!! encOption->mv8x8Enable;\n#endif\n    encParams->H263_Enabled = 0;\n    encParams->GOB_Header_Interval = 0; // need to be reset to 0\n    encParams->IntraPeriod = encOption->intraPeriod;    /* Intra update period update default*/\n    encParams->SceneChange_Det = encOption->sceneDetect;\n    encParams->FineFrameSkip_Enabled = 0;\n    encParams->NoFrameSkip_Enabled = encOption->noFrameSkipped;\n    encParams->NoPreSkip_Enabled = encOption->noFrameSkipped;\n    encParams->GetVolHeader[0] = 0;\n    encParams->GetVolHeader[1] = 0;\n    encParams->ResyncPacketsize = encOption->packetSize << 3;\n    encParams->LayerMaxBitRate[0] = 0;\n    encParams->LayerMaxBitRate[1] = 0;\n    encParams->LayerMaxFrameRate[0] = (float)0.0;\n    encParams->LayerMaxFrameRate[1] = (float)0.0;\n    encParams->VBV_delay = encOption->vbvDelay;  /* 2sec VBV buffer size */\n\n    switch (encOption->encMode)\n    {\n\n        case SHORT_HEADER:\n        case SHORT_HEADER_WITH_ERR_RES:\n\n            /* From Table 6-26 */\n            encParams->nLayers = 1;\n            encParams->QuantType[0] = 0;    /*H263 */\n            encParams->ResyncMarkerDisable = 1; /* Disable Resync Marker */\n            encParams->DataPartitioning = 0; /* Combined Mode */\n            encParams->ReversibleVLC = 0;   /* Disable RVLC */\n            encParams->RoundingType = 0;\n            encParams->IntraDCVlcThr = 7;   /* use_intra_dc_vlc = 0 */\n            encParams->MV8x8_Enabled = 0;\n\n            encParams->GOB_Header_Interval = encOption->gobHeaderInterval;\n            encParams->H263_Enabled = 2;\n            encParams->GOV_Enabled = 0;\n            encParams->TimeIncrementRes = 30000;        /* timeIncrementRes for H263 */\n            break;\n\n        case H263_MODE:\n        case H263_MODE_WITH_ERR_RES:\n\n            /* From Table 6-26 */\n            encParams->nLayers = 1;\n            encParams->QuantType[0] = 0;    /*H263 */\n            encParams->ResyncMarkerDisable = 1; /* Disable Resync Marker */\n            encParams->DataPartitioning = 0; /* Combined Mode */\n            encParams->ReversibleVLC = 0;   /* Disable RVLC */\n            encParams->RoundingType = 0;\n            encParams->IntraDCVlcThr = 7;   /* use_intra_dc_vlc = 0 */\n            encParams->MV8x8_Enabled = 0;\n\n            encParams->H263_Enabled = 1;\n            encParams->GOV_Enabled = 0;\n            encParams->TimeIncrementRes = 30000;        /* timeIncrementRes for H263 */\n\n            break;\n#ifndef H263_ONLY\n        case DATA_PARTITIONING_MODE:\n\n            encParams->DataPartitioning = 1;        /* Base Layer Data Partitioning */\n            encParams->ResyncMarkerDisable = 0; /* Resync Marker */\n#ifdef NO_RVLC\n            encParams->ReversibleVLC = 0;\n#else\n            encParams->ReversibleVLC = (encOption->rvlcEnable == PV_ON); /* RVLC when Data Partitioning */\n#endif\n            encParams->ResyncPacketsize = PacketSize;\n            break;\n\n        case COMBINE_MODE_WITH_ERR_RES:\n\n            encParams->DataPartitioning = 0;        /* Combined Mode */\n            encParams->ResyncMarkerDisable = 0; /* Resync Marker */\n            encParams->ReversibleVLC = 0;           /* No RVLC */\n            encParams->ResyncPacketsize = PacketSize;\n            break;\n\n        case COMBINE_MODE_NO_ERR_RES:\n\n            encParams->DataPartitioning = 0;        /* Combined Mode */\n            encParams->ResyncMarkerDisable = 1; /* Disable Resync Marker */\n            encParams->ReversibleVLC = 0;           /* No RVLC */\n            break;\n#endif\n        default:\n            goto CLEAN_UP;\n    }\n    /* Set the constraints (maximum values) according to the input profile and level */\n    /* Note that profile_table_index is already figured out above */\n\n    /* base layer */\n    encParams->profile_table_index    = profile_table_index; /* Used to limit the profile and level in SetProfile_BufferSize() */\n\n    /* check timeIncRes */\n    timeIncRes = encOption->timeIncRes;\n    timeInc = encOption->tickPerSrc;\n\n    if ((timeIncRes >= 1) && (timeIncRes <= 65536) && (timeInc < timeIncRes) && (timeInc != 0))\n    {\n        if (!encParams->H263_Enabled)\n        {\n            encParams->TimeIncrementRes = timeIncRes;\n        }\n        else\n        {\n            encParams->TimeIncrementRes = 30000;\n//          video->FrameRate = 30000/(float)1001; /* fix it to 29.97 fps */\n        }\n        video->FrameRate = timeIncRes / ((float)timeInc);\n    }\n    else\n    {\n        goto CLEAN_UP;\n    }\n\n    /* check frame dimension */\n    if (encParams->H263_Enabled)\n    {\n        switch (encOption->encWidth[0])\n        {\n            case 128:\n                if (encOption->encHeight[0] != 96) /* source_format = 1 */\n                    goto CLEAN_UP;\n                break;\n            case 176:\n                if (encOption->encHeight[0] != 144) /* source_format = 2 */\n                    goto CLEAN_UP;\n                break;\n            case 352:\n                if (encOption->encHeight[0] != 288) /* source_format = 2 */\n                    goto CLEAN_UP;\n                break;\n\n            case 704:\n                if (encOption->encHeight[0] != 576) /* source_format = 2 */\n                    goto CLEAN_UP;\n                break;\n            case 1408:\n                if (encOption->encHeight[0] != 1152) /* source_format = 2 */\n                    goto CLEAN_UP;\n                break;\n\n            default:\n                goto CLEAN_UP;\n        }\n    }\n    for (i = 0; i < encParams->nLayers; i++)\n    {\n        encParams->LayerHeight[i] = encOption->encHeight[i];\n        encParams->LayerWidth[i] = encOption->encWidth[i];\n    }\n\n    /* check frame rate */\n    for (i = 0; i < encParams->nLayers; i++)\n    {\n        encParams->LayerFrameRate[i] = encOption->encFrameRate[i];\n    }\n\n    if (encParams->nLayers > 1)\n    {\n        if (encOption->encFrameRate[0] == encOption->encFrameRate[1] ||\n                encOption->encFrameRate[0] == 0. || encOption->encFrameRate[1] == 0.) /* 7/31/03 */\n            goto CLEAN_UP;\n    }\n    /* set max frame rate */\n    for (i = 0; i < encParams->nLayers; i++)\n    {\n\n        /* Make sure the maximum framerate is consistent with the given profile and level */\n        nTotalMB = ((encParams->LayerWidth[i] + 15) / 16) * ((encParams->LayerHeight[i] + 15) / 16);\n\n        if (nTotalMB > 0)\n            profile_max_framerate = (float)encParams->LayerMaxMbsPerSec[i] / (float)nTotalMB;\n\n        else\n            profile_max_framerate = (float)30.0;\n\n        encParams->LayerMaxFrameRate[i] = PV_MIN(profile_max_framerate, encParams->LayerFrameRate[i]);\n    }\n\n    /* check bit rate */\n    /* set max bit rate */\n    for (i = 0; i < encParams->nLayers; i++)\n    {\n        encParams->LayerBitRate[i] = encOption->bitRate[i];\n        encParams->LayerMaxBitRate[i] = encOption->bitRate[i];\n    }\n    if (encParams->nLayers > 1)\n    {\n        if (encOption->bitRate[0] == encOption->bitRate[1] ||\n                encOption->bitRate[0] == 0 || encOption->bitRate[1] == 0) /* 7/31/03 */\n            goto CLEAN_UP;\n    }\n    /* check rate control and vbv delay*/\n    encParams->RC_Type = encOption->rcType;\n\n    if (encOption->vbvDelay == 0.0) /* set to default */\n    {\n        switch (encOption->rcType)\n        {\n            case CBR_1:\n            case CBR_2:\n                encParams->VBV_delay = (float)2.0; /* default 2sec VBV buffer size */\n                break;\n\n            case CBR_LOWDELAY:\n                encParams->VBV_delay = (float)0.5; /* default 0.5sec VBV buffer size */\n                break;\n\n            case VBR_1:\n            case VBR_2:\n                encParams->VBV_delay = (float)10.0; /* default 10sec VBV buffer size */\n                break;\n            default:\n                break;\n        }\n    }\n    else /* force this value */\n    {\n        encParams->VBV_delay = encOption->vbvDelay;\n    }\n\n    /* check search range */\n    if (encParams->H263_Enabled && encOption->searchRange > 16)\n    {\n        encParams->SearchRange = 16; /* 4/16/2001 */\n    }\n\n    /*****************************************/\n    /* checking for conflict between options */\n    /*****************************************/\n\n    if (video->encParams->RC_Type == CBR_1 || video->encParams->RC_Type == CBR_2 || video->encParams->RC_Type == CBR_LOWDELAY)  /* if CBR */\n    {\n#ifdef _PRINT_STAT\n        if (video->encParams->NoFrameSkip_Enabled == PV_ON ||\n                video->encParams->NoPreSkip_Enabled == PV_ON) /* don't allow frame skip*/\n            printf(\"WARNING!!!! CBR with NoFrameSkip\\n\");\n#endif\n    }\n    else if (video->encParams->RC_Type == CONSTANT_Q)   /* constant_Q */\n    {\n        video->encParams->NoFrameSkip_Enabled = PV_ON;  /* no frame skip */\n        video->encParams->NoPreSkip_Enabled = PV_ON;    /* no frame skip */\n#ifdef _PRINT_STAT\n        printf(\"Turn on NoFrameSkip\\n\");\n#endif\n    }\n\n    if (video->encParams->NoFrameSkip_Enabled == PV_ON) /* if no frame skip */\n    {\n        video->encParams->FineFrameSkip_Enabled = PV_OFF;\n#ifdef _PRINT_STAT\n        printf(\"NoFrameSkip !!! may violate VBV_BUFFER constraint.\\n\");\n        printf(\"Turn off FineFrameSkip\\n\");\n#endif\n    }\n\n    /******************************************/\n    /******************************************/\n\n    nLayers = video->encParams->nLayers; /* Number of Layers to be encoded */\n\n    /* Find the maximum width*height for memory allocation of the VOPs */\n    for (idx = 0; idx < nLayers; idx++)\n    {\n        temp_w = video->encParams->LayerWidth[idx];\n        temp_h = video->encParams->LayerHeight[idx];\n\n        if ((temp_w*temp_h) > max)\n        {\n            max = temp_w * temp_h;\n            max_width = ((temp_w + 15) >> 4) << 4;\n            max_height = ((temp_h + 15) >> 4) << 4;\n            nTotalMB = ((max_width * max_height) >> 8);\n        }\n\n        /* Check if the video size and framerate(MBsPerSec) are vald */\n        mbsPerSec = (Int)(nTotalMB * video->encParams->LayerFrameRate[idx]);\n        if (mbsPerSec > video->encParams->LayerMaxMbsPerSec[idx]) status = PV_FALSE;\n    }\n\n    /****************************************************/\n    /* Set Profile and Video Buffer Size for each layer */\n    /****************************************************/\n    if (video->encParams->RC_Type == CBR_LOWDELAY) video->encParams->VBV_delay = 0.5; /* For CBR_LOWDELAY, we set 0.5sec buffer */\n    status = SetProfile_BufferSize(video, video->encParams->VBV_delay, 1);\n    if (status != PV_TRUE)\n        goto CLEAN_UP;\n\n    /****************************************/\n    /* memory allocation and initialization */\n    /****************************************/\n\n    if (video == NULL) goto CLEAN_UP;\n\n    /* cyclic reference for passing through both structures */\n    video->videoEncControls = encoderControl;\n\n    //video->currLayer = 0; /* Set current Layer to 0 */\n    //video->currFrameNo = 0; /* Set current frame Number to 0 */\n    video->nextModTime = 0;\n    video->nextEncIVop = 0; /* Sets up very first frame to be I-VOP! */\n    video->numVopsInGOP = 0; /* counter for Vops in Gop, 2/8/01 */\n\n    //video->frameRate = video->encParams->LayerFrameRate[0]; /* Set current layer frame rate */\n\n    video->QPMB = (UChar *) M4VENC_MALLOC(nTotalMB * sizeof(UChar)); /* Memory for MB quantizers */\n    if (video->QPMB == NULL) goto CLEAN_UP;\n\n\n    video->headerInfo.Mode = (UChar *) M4VENC_MALLOC(sizeof(UChar) * nTotalMB); /* Memory for MB Modes */\n    if (video->headerInfo.Mode == NULL) goto CLEAN_UP;\n    video->headerInfo.CBP = (UChar *) M4VENC_MALLOC(sizeof(UChar) * nTotalMB);   /* Memory for CBP (Y and C) of each MB */\n    if (video->headerInfo.CBP == NULL) goto CLEAN_UP;\n\n    /* Allocating motion vector space and interpolation memory*/\n\n    video->mot = (MOT **)M4VENC_MALLOC(sizeof(MOT *) * nTotalMB);\n    if (video->mot == NULL) goto CLEAN_UP;\n\n    for (idx = 0; idx < nTotalMB; idx++)\n    {\n        video->mot[idx] = (MOT *)M4VENC_MALLOC(sizeof(MOT) * 8);\n        if (video->mot[idx] == NULL)\n        {\n            goto CLEAN_UP;\n        }\n    }\n\n    video->intraArray = (UChar *)M4VENC_MALLOC(sizeof(UChar) * nTotalMB);\n    if (video->intraArray == NULL) goto CLEAN_UP;\n\n    video->sliceNo = (UChar *) M4VENC_MALLOC(nTotalMB); /* Memory for Slice Numbers */\n    if (video->sliceNo == NULL) goto CLEAN_UP;\n    /* Allocating space for predDCAC[][8][16], Not that I intentionally  */\n    /*    increase the dimension of predDCAC from [][6][15] to [][8][16] */\n    /*    so that compilers can generate faster code to indexing the     */\n    /*    data inside (by using << instead of *).         04/14/2000. */\n    /* 5/29/01, use  decoder lib ACDC prediction memory scheme.  */\n    video->predDC = (typeDCStore *) M4VENC_MALLOC(nTotalMB * sizeof(typeDCStore));\n    if (video->predDC == NULL) goto CLEAN_UP;\n\n    if (!video->encParams->H263_Enabled)\n    {\n        video->predDCAC_col = (typeDCACStore *) M4VENC_MALLOC(((max_width >> 4) + 1) * sizeof(typeDCACStore));\n        if (video->predDCAC_col == NULL) goto CLEAN_UP;\n\n        /* element zero will be used for storing vertical (col) AC coefficients */\n        /*  the rest will be used for storing horizontal (row) AC coefficients  */\n        video->predDCAC_row = video->predDCAC_col + 1;        /*  ACDC */\n\n        video->acPredFlag = (Int *) M4VENC_MALLOC(nTotalMB * sizeof(Int)); /* Memory for acPredFlag */\n        if (video->acPredFlag == NULL) goto CLEAN_UP;\n    }\n\n    video->outputMB = (MacroBlock *) M4VENC_MALLOC(sizeof(MacroBlock)); /* Allocating macroblock space */\n    if (video->outputMB == NULL) goto CLEAN_UP;\n    M4VENC_MEMSET(video->outputMB->block[0], 0, (sizeof(Short) << 6)*6);\n\n    M4VENC_MEMSET(video->dataBlock, 0, sizeof(Short) << 7);\n    /* Allocate (2*packetsize) working bitstreams */\n\n    video->bitstream1 = BitStreamCreateEnc(2 * 4096); /*allocate working stream 1*/\n    if (video->bitstream1 == NULL) goto CLEAN_UP;\n    video->bitstream2 = BitStreamCreateEnc(2 * 4096); /*allocate working stream 2*/\n    if (video->bitstream2 == NULL) goto CLEAN_UP;\n    video->bitstream3 = BitStreamCreateEnc(2 * 4096); /*allocate working stream 3*/\n    if (video->bitstream3 == NULL) goto CLEAN_UP;\n\n    /* allocate overrun buffer */\n    // this buffer is used when user's buffer is too small to hold one frame.\n    // It is not needed for slice-based encoding.\n    if (nLayers == 1)\n    {\n        video->oBSize = encParams->BufferSize[0] >> 3;\n    }\n    else\n    {\n        video->oBSize = PV_MAX((encParams->BufferSize[0] >> 3), (encParams->BufferSize[1] >> 3));\n    }\n\n    if (video->oBSize > DEFAULT_OVERRUN_BUFFER_SIZE || encParams->RC_Type == CONSTANT_Q) // set limit\n    {\n        video->oBSize = DEFAULT_OVERRUN_BUFFER_SIZE;\n    }\n    video->overrunBuffer = (UChar*) M4VENC_MALLOC(sizeof(UChar) * video->oBSize);\n    if (video->overrunBuffer == NULL) goto CLEAN_UP;\n\n\n    video->currVop = (Vop *) M4VENC_MALLOC(sizeof(Vop)); /* Memory for Current VOP */\n    if (video->currVop == NULL) goto CLEAN_UP;\n\n    /* add padding, 09/19/05 */\n    if (video->encParams->H263_Enabled) /* make it conditional  11/28/05 */\n    {\n        pitch = max_width;\n        offset = 0;\n    }\n    else\n    {\n        pitch = max_width + 32;\n        offset = (pitch << 4) + 16;\n        max_height += 32;\n    }\n    size = pitch * max_height;\n\n    video->currVop->yChan = (PIXEL *)M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for currVop Y */\n    if (video->currVop->yChan == NULL) goto CLEAN_UP;\n    video->currVop->uChan = video->currVop->yChan + size;/* Memory for currVop U */\n    video->currVop->vChan = video->currVop->uChan + (size >> 2);/* Memory for currVop V */\n\n    /* shift for the offset */\n    if (offset)\n    {\n        video->currVop->yChan += offset; /* offset to the origin.*/\n        video->currVop->uChan += (offset >> 2) + 4;\n        video->currVop->vChan += (offset >> 2) + 4;\n    }\n\n    video->forwardRefVop = video->currVop;      /*  Initialize forwardRefVop */\n    video->backwardRefVop = video->currVop;     /*  Initialize backwardRefVop */\n\n    video->prevBaseVop = (Vop *) M4VENC_MALLOC(sizeof(Vop));         /* Memory for Previous Base Vop */\n    if (video->prevBaseVop == NULL) goto CLEAN_UP;\n    video->prevBaseVop->yChan = (PIXEL *) M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for prevBaseVop Y */\n    if (video->prevBaseVop->yChan == NULL) goto CLEAN_UP;\n    video->prevBaseVop->uChan = video->prevBaseVop->yChan + size; /* Memory for prevBaseVop U */\n    video->prevBaseVop->vChan = video->prevBaseVop->uChan + (size >> 2); /* Memory for prevBaseVop V */\n\n    if (offset)\n    {\n        video->prevBaseVop->yChan += offset; /* offset to the origin.*/\n        video->prevBaseVop->uChan += (offset >> 2) + 4;\n        video->prevBaseVop->vChan += (offset >> 2) + 4;\n    }\n\n\n    if (0) /* If B Frames */\n    {\n        video->nextBaseVop = (Vop *) M4VENC_MALLOC(sizeof(Vop));         /* Memory for Next Base Vop */\n        if (video->nextBaseVop == NULL) goto CLEAN_UP;\n        video->nextBaseVop->yChan = (PIXEL *) M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for nextBaseVop Y */\n        if (video->nextBaseVop->yChan == NULL) goto CLEAN_UP;\n        video->nextBaseVop->uChan = video->nextBaseVop->yChan + size; /* Memory for nextBaseVop U */\n        video->nextBaseVop->vChan = video->nextBaseVop->uChan + (size >> 2); /* Memory for nextBaseVop V */\n\n        if (offset)\n        {\n            video->nextBaseVop->yChan += offset; /* offset to the origin.*/\n            video->nextBaseVop->uChan += (offset >> 2) + 4;\n            video->nextBaseVop->vChan += (offset >> 2) + 4;\n        }\n    }\n\n    if (nLayers > 1)   /* If enhancement layers */\n    {\n        video->prevEnhanceVop = (Vop *) M4VENC_MALLOC(sizeof(Vop));      /* Memory for Previous Enhancement Vop */\n        if (video->prevEnhanceVop == NULL) goto CLEAN_UP;\n        video->prevEnhanceVop->yChan = (PIXEL *) M4VENC_MALLOC(sizeof(PIXEL) * (size + (size >> 1))); /* Memory for Previous Ehancement Y */\n        if (video->prevEnhanceVop->yChan == NULL) goto CLEAN_UP;\n        video->prevEnhanceVop->uChan = video->prevEnhanceVop->yChan + size; /* Memory for Previous Enhancement U */\n        video->prevEnhanceVop->vChan = video->prevEnhanceVop->uChan + (size >> 2); /* Memory for Previous Enhancement V */\n\n        if (offset)\n        {\n            video->prevEnhanceVop->yChan += offset; /* offset to the origin.*/\n            video->prevEnhanceVop->uChan += (offset >> 2) + 4;\n            video->prevEnhanceVop->vChan += (offset >> 2) + 4;\n        }\n    }\n\n    video->numberOfLayers = nLayers; /* Number of Layers */\n    video->sumMAD = 0;\n\n\n    /* 04/09/01, for Vops in the use multipass processing */\n    for (idx = 0; idx < nLayers; idx++)\n    {\n        video->pMP[idx] = (MultiPass *)M4VENC_MALLOC(sizeof(MultiPass));\n        if (video->pMP[idx] == NULL)    goto CLEAN_UP;\n        M4VENC_MEMSET(video->pMP[idx], 0, sizeof(MultiPass));\n\n        video->pMP[idx]->encoded_frames = -1; /* forget about the very first I frame */\n\n\n        /* RDInfo **pRDSamples */\n        video->pMP[idx]->pRDSamples = (RDInfo **)M4VENC_MALLOC(30 * sizeof(RDInfo *));\n        if (video->pMP[idx]->pRDSamples == NULL)    goto CLEAN_UP;\n        for (i = 0; i < 30; i++)\n        {\n            video->pMP[idx]->pRDSamples[i] = (RDInfo *)M4VENC_MALLOC(32 * sizeof(RDInfo));\n            if (video->pMP[idx]->pRDSamples[i] == NULL) goto CLEAN_UP;\n            for (j = 0; j < 32; j++)    M4VENC_MEMSET(&(video->pMP[idx]->pRDSamples[i][j]), 0, sizeof(RDInfo));\n        }\n        video->pMP[idx]->frameRange = (Int)(video->encParams->LayerFrameRate[idx] * 1.0); /* 1.0s time frame*/\n        video->pMP[idx]->frameRange = PV_MAX(video->pMP[idx]->frameRange, 5);\n        video->pMP[idx]->frameRange = PV_MIN(video->pMP[idx]->frameRange, 30);\n\n        video->pMP[idx]->framePos = -1;\n\n    }\n    /* /// End /////////////////////////////////////// */\n\n\n    video->vol = (Vol **)M4VENC_MALLOC(nLayers * sizeof(Vol *)); /* Memory for VOL pointers */\n\n    /* Memory allocation and Initialization of Vols and writing of headers */\n    if (video->vol == NULL) goto CLEAN_UP;\n\n    for (idx = 0; idx < nLayers; idx++)\n    {\n        video->volInitialize[idx] = 1;\n        video->refTick[idx] = 0;\n        video->relLayerCodeTime[idx] = 1000;\n        video->vol[idx] = (Vol *)M4VENC_MALLOC(sizeof(Vol));\n        if (video->vol[idx] == NULL)  goto CLEAN_UP;\n\n        pVol = video->vol[idx];\n        pEncParams = video->encParams;\n\n        M4VENC_MEMSET(video->vol[idx], 0, sizeof(Vol));\n        /* Initialize some VOL parameters */\n        pVol->volID = idx;  /* Set VOL ID */\n        pVol->shortVideoHeader = pEncParams->H263_Enabled; /*Short Header */\n        pVol->GOVStart = pEncParams->GOV_Enabled; /* GOV Header */\n        pVol->timeIncrementResolution = video->encParams->TimeIncrementRes;\n        pVol->nbitsTimeIncRes = 1;\n        while (pVol->timeIncrementResolution > (1 << pVol->nbitsTimeIncRes))\n        {\n            pVol->nbitsTimeIncRes++;\n        }\n\n        /* timing stuff */\n        pVol->timeIncrement = 0;\n        pVol->moduloTimeBase = 0;\n        pVol->fixedVopRate = 0; /* No fixed VOP rate */\n        pVol->stream = (BitstreamEncVideo *)M4VENC_MALLOC(sizeof(BitstreamEncVideo)); /* allocate BitstreamEncVideo Instance */\n        if (pVol->stream == NULL)  goto CLEAN_UP;\n\n        pVol->width = pEncParams->LayerWidth[idx];      /* Layer Width */\n        pVol->height = pEncParams->LayerHeight[idx];    /* Layer Height */\n        //  pVol->intra_acdcPredDisable = pEncParams->ACDCPrediction; /* ACDC Prediction */\n        pVol->ResyncMarkerDisable = pEncParams->ResyncMarkerDisable; /* Resync Marker Mode */\n        pVol->dataPartitioning = pEncParams->DataPartitioning; /* Data Partitioning */\n        pVol->useReverseVLC = pEncParams->ReversibleVLC; /* RVLC */\n        if (idx > 0) /* Scalability layers */\n        {\n            pVol->ResyncMarkerDisable = 1;\n            pVol->dataPartitioning = 0;\n            pVol->useReverseVLC = 0; /*  No RVLC */\n        }\n        pVol->quantType = pEncParams->QuantType[idx];           /* Quantizer Type */\n\n        /* no need to init Quant Matrices */\n\n        pVol->scalability = 0;  /* Vol Scalability */\n        if (idx > 0)\n            pVol->scalability = 1; /* Multiple layers => Scalability */\n\n        /* Initialize Vol to Temporal scalability.  It can change during encoding */\n        pVol->scalType = 1;\n        /* Initialize reference Vol ID to the base layer = 0 */\n        pVol->refVolID = 0;\n        /* Initialize layer resolution to same as the reference */\n        pVol->refSampDir = 0;\n        pVol->horSamp_m = 1;\n        pVol->horSamp_n = 1;\n        pVol->verSamp_m = 1;\n        pVol->verSamp_n = 1;\n        pVol->enhancementType = 0; /* We always enhance the entire region */\n\n        pVol->nMBPerRow = (pVol->width + 15) / 16;\n        pVol->nMBPerCol = (pVol->height + 15) / 16;\n        pVol->nTotalMB = pVol->nMBPerRow * pVol->nMBPerCol;\n\n        if (pVol->nTotalMB >= 1)\n            pVol->nBitsForMBID = 1;\n        if (pVol->nTotalMB >= 3)\n            pVol->nBitsForMBID = 2;\n        if (pVol->nTotalMB >= 5)\n            pVol->nBitsForMBID = 3;\n        if (pVol->nTotalMB >= 9)\n            pVol->nBitsForMBID = 4;\n        if (pVol->nTotalMB >= 17)\n            pVol->nBitsForMBID = 5;\n        if (pVol->nTotalMB >= 33)\n            pVol->nBitsForMBID = 6;\n        if (pVol->nTotalMB >= 65)\n            pVol->nBitsForMBID = 7;\n        if (pVol->nTotalMB >= 129)\n            pVol->nBitsForMBID = 8;\n        if (pVol->nTotalMB >= 257)\n            pVol->nBitsForMBID = 9;\n        if (pVol->nTotalMB >= 513)\n            pVol->nBitsForMBID = 10;\n        if (pVol->nTotalMB >= 1025)\n            pVol->nBitsForMBID = 11;\n        if (pVol->nTotalMB >= 2049)\n            pVol->nBitsForMBID = 12;\n        if (pVol->nTotalMB >= 4097)\n            pVol->nBitsForMBID = 13;\n        if (pVol->nTotalMB >= 8193)\n            pVol->nBitsForMBID = 14;\n        if (pVol->nTotalMB >= 16385)\n            pVol->nBitsForMBID = 15;\n        if (pVol->nTotalMB >= 32769)\n            pVol->nBitsForMBID = 16;\n        if (pVol->nTotalMB >= 65537)\n            pVol->nBitsForMBID = 17;\n        if (pVol->nTotalMB >= 131073)\n            pVol->nBitsForMBID = 18;\n\n        if (pVol->shortVideoHeader)\n        {\n            switch (pVol->width)\n            {\n                case 128:\n                    if (pVol->height == 96)  /* source_format = 1 */\n                    {\n                        pVol->nGOBinVop = 6;\n                        pVol->nMBinGOB = 8;\n                    }\n                    else\n                        status = PV_FALSE;\n                    break;\n\n                case 176:\n                    if (pVol->height == 144)  /* source_format = 2 */\n                    {\n                        pVol->nGOBinVop = 9;\n                        pVol->nMBinGOB = 11;\n                    }\n                    else\n                        status = PV_FALSE;\n                    break;\n                case 352:\n                    if (pVol->height == 288)  /* source_format = 2 */\n                    {\n                        pVol->nGOBinVop = 18;\n                        pVol->nMBinGOB = 22;\n                    }\n                    else\n                        status = PV_FALSE;\n                    break;\n\n                case 704:\n                    if (pVol->height == 576)  /* source_format = 2 */\n                    {\n                        pVol->nGOBinVop = 18;\n                        pVol->nMBinGOB = 88;\n                    }\n                    else\n                        status = PV_FALSE;\n                    break;\n                case 1408:\n                    if (pVol->height == 1152)  /* source_format = 2 */\n                    {\n                        pVol->nGOBinVop = 18;\n                        pVol->nMBinGOB = 352;\n                    }\n                    else\n                        status = PV_FALSE;\n                    break;\n\n                default:\n                    status = PV_FALSE;\n                    break;\n            }\n        }\n    }\n\n    /***************************************************/\n    /* allocate and initialize rate control parameters */\n    /***************************************************/\n\n    /* BEGIN INITIALIZATION OF ANNEX L RATE CONTROL */\n    if (video->encParams->RC_Type != CONSTANT_Q)\n    {\n        for (idx = 0; idx < nLayers; idx++) /* 12/25/00 */\n        {\n            video->rc[idx] =\n                (rateControl *)M4VENC_MALLOC(sizeof(rateControl));\n\n            if (video->rc[idx] == NULL) goto CLEAN_UP;\n\n            M4VENC_MEMSET(video->rc[idx], 0, sizeof(rateControl));\n        }\n        if (PV_SUCCESS != RC_Initialize(video))\n        {\n            goto CLEAN_UP;\n        }\n        /* initialization for 2-pass rate control */\n    }\n    /* END INITIALIZATION OF ANNEX L RATE CONTROL */\n\n    /********** assign platform dependent functions ***********************/\n    /* 1/23/01 */\n    /* This must be done at run-time not a compile time */\n    video->functionPointer = (FuncPtr*) M4VENC_MALLOC(sizeof(FuncPtr));\n    if (video->functionPointer == NULL) goto CLEAN_UP;\n\n    video->functionPointer->ComputeMBSum = &ComputeMBSum_C;\n    video->functionPointer->SAD_MB_HalfPel[0] = NULL;\n    video->functionPointer->SAD_MB_HalfPel[1] = &SAD_MB_HalfPel_Cxh;\n    video->functionPointer->SAD_MB_HalfPel[2] = &SAD_MB_HalfPel_Cyh;\n    video->functionPointer->SAD_MB_HalfPel[3] = &SAD_MB_HalfPel_Cxhyh;\n\n#ifndef NO_INTER4V\n    video->functionPointer->SAD_Blk_HalfPel = &SAD_Blk_HalfPel_C;\n    video->functionPointer->SAD_Block = &SAD_Block_C;\n#endif\n    video->functionPointer->SAD_Macroblock = &SAD_Macroblock_C;\n    video->functionPointer->ChooseMode = &ChooseMode_C;\n    video->functionPointer->GetHalfPelMBRegion = &GetHalfPelMBRegion_C;\n//  video->functionPointer->SAD_MB_PADDING = &SAD_MB_PADDING; /* 4/21/01 */\n\n\n    encoderControl->videoEncoderInit = 1;  /* init done! */\n\n    return PV_TRUE;\n\nCLEAN_UP:\n    PVCleanUpVideoEncoder(encoderControl);\n\n    return PV_FALSE;\n}\n\n\n/* ======================================================================== */\n/*  Function : PVCleanUpVideoEncoder()                                      */\n/*  Date     : 08/22/2000                                                   */\n/*  Purpose  : Deallocates allocated memory from InitVideoEncoder()         */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified : 5/21/01, free only yChan in Vop                          */\n/*                                                                          */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF Bool    PVCleanUpVideoEncoder(VideoEncControls *encoderControl)\n{\n    Int idx, i;\n    VideoEncData *video = (VideoEncData *)encoderControl->videoEncoderData;\n    int nTotalMB;\n    int max_width, offset;\n\n#ifdef PRINT_RC_INFO\n    if (facct != NULL)\n    {\n        fprintf(facct, \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\\n\");\n        fprintf(facct, \"TOTAL NUM BITS GENERATED %d\\n\", tiTotalNumBitsGenerated);\n        fprintf(facct, \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\\n\");\n        fprintf(facct, \"TOTAL NUMBER OF FRAMES CODED %d\\n\",\n                video->encParams->rc[0]->totalFrameNumber);\n        fprintf(facct, \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\\n\");\n        fprintf(facct, \"Average BitRate %d\\n\",\n                (tiTotalNumBitsGenerated / (90 / 30)));\n        fprintf(facct, \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\\n\");\n        fprintf(facct, \"TOTAL NUMBER OF STUFF BITS %d\\n\", (iStuffBits + 10740));\n        fprintf(facct, \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\\n\");\n        fprintf(facct, \"TOTAL NUMBER OF BITS TO NETWORK %d\\n\", (35800*90 / 30));;\n        fprintf(facct, \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\\n\");\n        fprintf(facct, \"SUM OF STUFF BITS AND GENERATED BITS %d\\n\",\n                (tiTotalNumBitsGenerated + iStuffBits + 10740));\n        fprintf(facct, \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\\n\");\n        fprintf(facct, \"UNACCOUNTED DIFFERENCE %d\\n\",\n                ((35800*90 / 30) - (tiTotalNumBitsGenerated + iStuffBits + 10740)));\n        fprintf(facct, \"@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\\n\");\n        fclose(facct);\n    }\n#endif\n\n#ifdef PRINT_EC\n    fclose(fec);\n#endif\n\n    if (video != NULL)\n    {\n\n        if (video->QPMB) M4VENC_FREE(video->QPMB);\n        if (video->headerInfo.Mode)M4VENC_FREE(video->headerInfo.Mode);\n        if (video->headerInfo.CBP)M4VENC_FREE(video->headerInfo.CBP);\n\n\n        if (video->mot)\n        {\n            nTotalMB = video->vol[0]->nTotalMB;\n            for (idx = 1; idx < video->currLayer; idx++)\n                if (video->vol[idx]->nTotalMB > nTotalMB)\n                    nTotalMB = video->vol[idx]->nTotalMB;\n            for (idx = 0; idx < nTotalMB; idx++)\n            {\n                if (video->mot[idx])\n                    M4VENC_FREE(video->mot[idx]);\n            }\n            M4VENC_FREE(video->mot);\n        }\n\n        if (video->intraArray) M4VENC_FREE(video->intraArray);\n\n        if (video->sliceNo)M4VENC_FREE(video->sliceNo);\n        if (video->acPredFlag)M4VENC_FREE(video->acPredFlag);\n//      if(video->predDCAC)M4VENC_FREE(video->predDCAC);\n        if (video->predDC) M4VENC_FREE(video->predDC);\n        video->predDCAC_row = NULL;\n        if (video->predDCAC_col) M4VENC_FREE(video->predDCAC_col);\n        if (video->outputMB)M4VENC_FREE(video->outputMB);\n\n        if (video->bitstream1)BitstreamCloseEnc(video->bitstream1);\n        if (video->bitstream2)BitstreamCloseEnc(video->bitstream2);\n        if (video->bitstream3)BitstreamCloseEnc(video->bitstream3);\n\n        if (video->overrunBuffer) M4VENC_FREE(video->overrunBuffer);\n\n        max_width = video->encParams->LayerWidth[0];\n        max_width = (((max_width + 15) >> 4) << 4); /* 09/19/05 */\n        if (video->encParams->H263_Enabled)\n        {\n            offset = 0;\n        }\n        else\n        {\n            offset = ((max_width + 32) << 4) + 16;\n        }\n\n        if (video->currVop)\n        {\n            if (video->currVop->yChan)\n            {\n                video->currVop->yChan -= offset;\n                M4VENC_FREE(video->currVop->yChan);\n            }\n            M4VENC_FREE(video->currVop);\n        }\n\n        if (video->nextBaseVop)\n        {\n            if (video->nextBaseVop->yChan)\n            {\n                video->nextBaseVop->yChan -= offset;\n                M4VENC_FREE(video->nextBaseVop->yChan);\n            }\n            M4VENC_FREE(video->nextBaseVop);\n        }\n\n        if (video->prevBaseVop)\n        {\n            if (video->prevBaseVop->yChan)\n            {\n                video->prevBaseVop->yChan -= offset;\n                M4VENC_FREE(video->prevBaseVop->yChan);\n            }\n            M4VENC_FREE(video->prevBaseVop);\n        }\n        if (video->prevEnhanceVop)\n        {\n            if (video->prevEnhanceVop->yChan)\n            {\n                video->prevEnhanceVop->yChan -= offset;\n                M4VENC_FREE(video->prevEnhanceVop->yChan);\n            }\n            M4VENC_FREE(video->prevEnhanceVop);\n        }\n\n        /* 04/09/01, for Vops in the use multipass processing */\n        for (idx = 0; idx < video->encParams->nLayers; idx++)\n        {\n            if (video->pMP[idx])\n            {\n                if (video->pMP[idx]->pRDSamples)\n                {\n                    for (i = 0; i < 30; i++)\n                    {\n                        if (video->pMP[idx]->pRDSamples[i])\n                            M4VENC_FREE(video->pMP[idx]->pRDSamples[i]);\n                    }\n                    M4VENC_FREE(video->pMP[idx]->pRDSamples);\n                }\n\n                M4VENC_MEMSET(video->pMP[idx], 0, sizeof(MultiPass));\n                M4VENC_FREE(video->pMP[idx]);\n            }\n        }\n        /* //  End /////////////////////////////////////// */\n\n        if (video->vol)\n        {\n            for (idx = 0; idx < video->encParams->nLayers; idx++)\n            {\n                if (video->vol[idx])\n                {\n                    if (video->vol[idx]->stream)\n                        M4VENC_FREE(video->vol[idx]->stream);\n                    M4VENC_FREE(video->vol[idx]);\n                }\n            }\n            M4VENC_FREE(video->vol);\n        }\n\n        /***************************************************/\n        /* stop rate control parameters */\n        /***************************************************/\n\n        /* ANNEX L RATE CONTROL */\n        if (video->encParams->RC_Type != CONSTANT_Q)\n        {\n            RC_Cleanup(video->rc, video->encParams->nLayers);\n\n            for (idx = 0; idx < video->encParams->nLayers; idx++)\n            {\n                if (video->rc[idx])\n                    M4VENC_FREE(video->rc[idx]);\n            }\n        }\n\n        if (video->functionPointer) M4VENC_FREE(video->functionPointer);\n\n        /* If application has called PVCleanUpVideoEncoder then we deallocate */\n        /* If PVInitVideoEncoder class it, then we DO NOT deallocate */\n        if (video->encParams)\n        {\n            M4VENC_FREE(video->encParams);\n        }\n\n        M4VENC_FREE(video);\n        encoderControl->videoEncoderData = NULL; /* video */\n    }\n\n    encoderControl->videoEncoderInit = 0;\n\n    return PV_TRUE;\n}\n\n/* ======================================================================== */\n/*  Function : PVGetVolHeader()                                             */\n/*  Date     : 7/17/2001,                                                   */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF Bool PVGetVolHeader(VideoEncControls *encCtrl, UChar *volHeader, Int *size, Int layer)\n{\n    VideoEncData    *encData;\n    PV_STATUS   EncodeVOS_Start(VideoEncControls *encCtrl);\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n\n    if (encData == NULL)\n        return PV_FALSE;\n    if (encData->encParams == NULL)\n        return PV_FALSE;\n\n\n    encData->currLayer = layer; /* Set Layer */\n    /*pv_status = */\n    EncodeVOS_Start(encCtrl); /* Encode VOL Header */\n\n    encData->encParams->GetVolHeader[layer] = 1; /* Set usage flag: Needed to support old method*/\n\n    /* Copy bitstream to buffer and set the size */\n\n    if (*size > encData->bitstream1->byteCount)\n    {\n        *size = encData->bitstream1->byteCount;\n        M4VENC_MEMCPY(volHeader, encData->bitstream1->bitstreamBuffer, *size);\n    }\n    else\n        return PV_FALSE;\n\n    /* Reset bitstream1 buffer parameters */\n    BitstreamEncReset(encData->bitstream1);\n\n    return PV_TRUE;\n}\n\n/* ======================================================================== */\n/*  Function : PVGetOverrunBuffer()                                         */\n/*  Purpose  : Get the overrun buffer `                                     */\n/*  In/out   :                                                              */\n/*  Return   : Pointer to overrun buffer.                                   */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF UChar* PVGetOverrunBuffer(VideoEncControls *encCtrl)\n{\n    VideoEncData *video = (VideoEncData *)encCtrl->videoEncoderData;\n    Int currLayer = video->currLayer;\n    Vol *currVol = video->vol[currLayer];\n\n    if (currVol->stream->bitstreamBuffer != video->overrunBuffer) // not used\n    {\n        return NULL;\n    }\n\n    return video->overrunBuffer;\n}\n\n\n\n\n/* ======================================================================== */\n/*  Function : EncodeVideoFrame()                                           */\n/*  Date     : 08/22/2000                                                   */\n/*  Purpose  : Encode video frame and return bitstream                      */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*  02.14.2001                                      */\n/*              Finishing new timestamp 32-bit input                        */\n/*              Applications need to take care of wrap-around               */\n/* ======================================================================== */\nOSCL_EXPORT_REF Bool PVEncodeVideoFrame(VideoEncControls *encCtrl, VideoEncFrameIO *vid_in, VideoEncFrameIO *vid_out,\n                                        ULong *nextModTime, UChar *bstream, Int *size, Int *nLayer)\n{\n    Bool status = PV_TRUE;\n    PV_STATUS pv_status;\n    VideoEncData *video = (VideoEncData *)encCtrl->videoEncoderData;\n    VideoEncParams *encParams = video->encParams;\n    Vol *currVol;\n    Vop *tempForwRefVop = NULL;\n    Int tempRefSelCode = 0;\n    PV_STATUS   EncodeVOS_Start(VideoEncControls *encCtrl);\n    Int width_16, height_16;\n    Int width, height;\n    Vop *temp;\n    Int encodeVop = 0;\n    void  PaddingEdge(Vop *padVop);\n    Int currLayer = -1;\n    //Int nLayers = encParams->nLayers;\n\n    ULong modTime = vid_in->timestamp;\n\n#ifdef RANDOM_REFSELCODE   /* add random selection of reference Vop */\n    Int random_val[30] = {0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0};\n    static Int rand_idx = 0;\n#endif\n\n    /*******************************************************/\n    /* Determine Next Vop to encode, if any, and nLayer    */\n    /*******************************************************/\n    //i = nLayers-1;\n\n    if (video->volInitialize[0]) /* first vol to code */\n    {\n        video->nextModTime = video->modTimeRef = ((modTime) - ((modTime) % 1000));\n    }\n\n    encodeVop = DetermineCodingLayer(video, nLayer, modTime);\n    currLayer = *nLayer;\n    if ((currLayer < 0) || (currLayer > encParams->nLayers - 1))\n        return PV_FALSE;\n\n    /******************************************/\n    /* If post-skipping still effective --- return */\n    /******************************************/\n\n    if (!encodeVop) /* skip enh layer, no base layer coded --- return */\n    {\n#ifdef _PRINT_STAT\n        printf(\"No frame coded. Continue to next frame.\");\n#endif\n        /* expected next code time, convert back to millisec */\n        *nextModTime = video->nextModTime;\n\n#ifdef ALLOW_VOP_NOT_CODED\n        if (video->vol[0]->shortVideoHeader) /* Short Video Header = 1 */\n        {\n            *size = 0;\n            *nLayer = -1;\n        }\n        else\n        {\n            *nLayer = 0;\n            EncodeVopNotCoded(video, bstream, size, modTime);\n            *size = video->vol[0]->stream->byteCount;\n        }\n#else\n        *size = 0;\n        *nLayer = -1;\n#endif\n        return status;\n    }\n\n\n//ENCODE_VOP_AGAIN:  /* 12/30/00 */\n\n    /**************************************************************/\n    /* Initialize Vol stream structure with application bitstream */\n    /**************************************************************/\n\n    currVol = video->vol[currLayer];\n    currVol->stream->bitstreamBuffer = bstream;\n    currVol->stream->bufferSize = *size;\n    BitstreamEncReset(currVol->stream);\n    BitstreamSetOverrunBuffer(currVol->stream, video->overrunBuffer, video->oBSize, video);\n\n    /***********************************************************/\n    /* Encode VOS and VOL Headers on first call for each layer */\n    /***********************************************************/\n\n    if (video->volInitialize[currLayer])\n    {\n        video->currVop->timeInc = 0;\n        video->prevBaseVop->timeInc = 0;\n        if (!video->encParams->GetVolHeader[currLayer])\n            pv_status = EncodeVOS_Start(encCtrl);\n    }\n\n    /***************************************************/\n    /* Copy Input Video Frame to Internal Video Buffer */\n    /***************************************************/\n    /* Determine Width and Height of Vop Layer */\n\n    width = encParams->LayerWidth[currLayer];   /* Get input width */\n    height = encParams->LayerHeight[currLayer]; /* Get input height */\n    /* Round Up to nearest multiple of 16 : MPEG-4 Standard */\n\n    width_16 = ((width + 15) / 16) * 16;            /* Round up to nearest multiple of 16 */\n    height_16 = ((height + 15) / 16) * 16;          /* Round up to nearest multiple of 16 */\n\n    video->input = vid_in;  /* point to the frame input */\n\n    /*//  End ////////////////////////////// */\n\n\n    /**************************************/\n    /* Determine VOP Type                 */\n    /* 6/2/2001, separate function      */\n    /**************************************/\n    DetermineVopType(video, currLayer);\n\n    /****************************/\n    /*    Initialize VOP        */\n    /****************************/\n    video->currVop->volID = currVol->volID;\n    video->currVop->width = width_16;\n    video->currVop->height = height_16;\n    if (video->encParams->H263_Enabled) /*  11/28/05 */\n    {\n        video->currVop->pitch = width_16;\n    }\n    else\n    {\n        video->currVop->pitch = width_16 + 32;\n    }\n    video->currVop->timeInc = currVol->timeIncrement;\n    video->currVop->vopCoded = 1;\n    video->currVop->roundingType = 0;\n    video->currVop->intraDCVlcThr = encParams->IntraDCVlcThr;\n\n    if (currLayer == 0\n#ifdef RANDOM_REFSELCODE   /* add random selection of reference Vop */\n            || random_val[rand_idx] || video->volInitialize[currLayer]\n#endif\n       )\n    {\n        tempForwRefVop = video->forwardRefVop; /* keep initial state */\n        if (tempForwRefVop != NULL) tempRefSelCode = tempForwRefVop->refSelectCode;\n\n        video->forwardRefVop = video->prevBaseVop;\n        video->forwardRefVop->refSelectCode = 1;\n    }\n#ifdef RANDOM_REFSELCODE\n    else\n    {\n        tempForwRefVop = video->forwardRefVop; /* keep initial state */\n        if (tempForwRefVop != NULL) tempRefSelCode = tempForwRefVop->refSelectCode;\n\n        video->forwardRefVop = video->prevEnhanceVop;\n        video->forwardRefVop->refSelectCode = 0;\n    }\n    rand_idx++;\n    rand_idx %= 30;\n#endif\n\n    video->currVop->refSelectCode = video->forwardRefVop->refSelectCode;\n    video->currVop->gobNumber = 0;\n    video->currVop->gobFrameID = video->currVop->predictionType;\n    video->currVop->temporalRef = (modTime * 30 / 1001) % 256;\n\n    video->currVop->temporalInterval = 0;\n\n    if (video->currVop->predictionType == I_VOP)\n        video->currVop->quantizer = encParams->InitQuantIvop[currLayer];\n    else\n        video->currVop->quantizer = encParams->InitQuantPvop[currLayer];\n\n\n    /****************/\n    /* Encode Vop */\n    /****************/\n    video->slice_coding = 0;\n\n    pv_status = EncodeVop(video);\n#ifdef _PRINT_STAT\n    if (video->currVop->predictionType == I_VOP)\n        printf(\" I-VOP \");\n    else\n        printf(\" P-VOP (ref.%d)\", video->forwardRefVop->refSelectCode);\n#endif\n\n    /************************************/\n    /* Update Skip Next Frame           */\n    /************************************/\n    *nLayer = UpdateSkipNextFrame(video, nextModTime, size, pv_status);\n    if (*nLayer == -1) /* skip current frame */\n    {\n        /* make sure that pointers are restored to the previous state */\n        if (currLayer == 0)\n        {\n            video->forwardRefVop = tempForwRefVop; /* For P-Vop base only */\n            video->forwardRefVop->refSelectCode = tempRefSelCode;\n        }\n\n        return status;\n    }\n\n    /* If I-VOP was encoded, reset IntraPeriod */\n    if ((currLayer == 0) && (encParams->IntraPeriod > 0) && (video->currVop->predictionType == I_VOP))\n        video->nextEncIVop = encParams->IntraPeriod;\n\n    /* Set HintTrack Information */\n    if (currLayer != -1)\n    {\n        if (currVol->prevModuloTimeBase)\n            video->hintTrackInfo.MTB = 1;\n        else\n            video->hintTrackInfo.MTB = 0;\n        video->hintTrackInfo.LayerID = (UChar)currVol->volID;\n        video->hintTrackInfo.CodeType = (UChar)video->currVop->predictionType;\n        video->hintTrackInfo.RefSelCode = (UChar)video->currVop->refSelectCode;\n    }\n\n    /************************************************/\n    /* Determine nLayer and timeInc for next encode */\n    /* 12/27/00 always go by the highest layer*/\n    /************************************************/\n\n    /**********************************************************/\n    /* Copy Reconstructed Buffer to Output Video Frame Buffer */\n    /**********************************************************/\n    vid_out->yChan = video->currVop->yChan;\n    vid_out->uChan = video->currVop->uChan;\n    vid_out->vChan = video->currVop->vChan;\n    if (video->encParams->H263_Enabled)\n    {\n        vid_out->height = video->currVop->height; /* padded height */\n        vid_out->pitch = video->currVop->width; /* padded width */\n    }\n    else\n    {\n        vid_out->height = video->currVop->height + 32; /* padded height */\n        vid_out->pitch = video->currVop->width + 32; /* padded width */\n    }\n    //video_out->timestamp = video->modTime;\n    vid_out->timestamp = (ULong)(((video->prevFrameNum[currLayer] * 1000) / encParams->LayerFrameRate[currLayer]) + video->modTimeRef + 0.5);\n\n    /*// End /////////////////////// */\n\n    /***********************************/\n    /* Update Ouput bstream byte count */\n    /***********************************/\n\n    *size = currVol->stream->byteCount;\n\n    /****************************************/\n    /* Swap Vop Pointers for Base Layer     */\n    /****************************************/\n    if (currLayer == 0)\n    {\n        temp = video->prevBaseVop;\n        video->prevBaseVop = video->currVop;\n        video->prevBaseVop->padded = 0; /* not padded */\n        video->currVop  = temp;\n        video->forwardRefVop = video->prevBaseVop; /* For P-Vop base only */\n        video->forwardRefVop->refSelectCode = 1;\n    }\n    else\n    {\n        temp = video->prevEnhanceVop;\n        video->prevEnhanceVop = video->currVop;\n        video->prevEnhanceVop->padded = 0; /* not padded */\n        video->currVop = temp;\n        video->forwardRefVop = video->prevEnhanceVop;\n        video->forwardRefVop->refSelectCode = 0;\n    }\n\n    /****************************************/\n    /* Modify the intialize flag at the end.*/\n    /****************************************/\n    if (video->volInitialize[currLayer])\n        video->volInitialize[currLayer] = 0;\n\n    return status;\n}\n\n#ifndef NO_SLICE_ENCODE\n/* ======================================================================== */\n/*  Function : PVEncodeFrameSet()                                           */\n/*  Date     : 04/18/2000                                                   */\n/*  Purpose  : Enter a video frame and perform front-end time check plus ME */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\nOSCL_EXPORT_REF Bool PVEncodeFrameSet(VideoEncControls *encCtrl, VideoEncFrameIO *vid_in, ULong *nextModTime, Int *nLayer)\n{\n    Bool status = PV_TRUE;\n    VideoEncData *video = (VideoEncData *)encCtrl->videoEncoderData;\n    VideoEncParams *encParams = video->encParams;\n    Vol *currVol;\n    PV_STATUS   EncodeVOS_Start(VideoEncControls *encCtrl);\n    Int width_16, height_16;\n    Int width, height;\n    Int encodeVop = 0;\n    void  PaddingEdge(Vop *padVop);\n    Int currLayer = -1;\n    //Int nLayers = encParams->nLayers;\n\n    ULong   modTime = vid_in->timestamp;\n\n#ifdef RANDOM_REFSELCODE   /* add random selection of reference Vop */\n    Int random_val[30] = {0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0};\n    static Int rand_idx = 0;\n#endif\n    /*******************************************************/\n    /* Determine Next Vop to encode, if any, and nLayer    */\n    /*******************************************************/\n\n    video->modTime = modTime;\n\n    //i = nLayers-1;\n\n    if (video->volInitialize[0]) /* first vol to code */\n    {\n        video->nextModTime = video->modTimeRef = ((modTime) - ((modTime) % 1000));\n    }\n\n\n    encodeVop = DetermineCodingLayer(video, nLayer, modTime);\n\n    currLayer = *nLayer;\n\n    /******************************************/\n    /* If post-skipping still effective --- return */\n    /******************************************/\n\n    if (!encodeVop) /* skip enh layer, no base layer coded --- return */\n    {\n#ifdef _PRINT_STAT\n        printf(\"No frame coded. Continue to next frame.\");\n#endif\n        *nLayer = -1;\n\n        /* expected next code time, convert back to millisec */\n        *nextModTime = video->nextModTime;;\n        return status;\n    }\n\n    /**************************************************************/\n    /* Initialize Vol stream structure with application bitstream */\n    /**************************************************************/\n\n    currVol = video->vol[currLayer];\n    currVol->stream->bufferSize = 0;\n    BitstreamEncReset(currVol->stream);\n\n    /***********************************************************/\n    /* Encode VOS and VOL Headers on first call for each layer */\n    /***********************************************************/\n\n    if (video->volInitialize[currLayer])\n    {\n        video->currVop->timeInc = 0;\n        video->prevBaseVop->timeInc = 0;\n    }\n\n    /***************************************************/\n    /* Copy Input Video Frame to Internal Video Buffer */\n    /***************************************************/\n    /* Determine Width and Height of Vop Layer */\n\n    width = encParams->LayerWidth[currLayer];   /* Get input width */\n    height = encParams->LayerHeight[currLayer]; /* Get input height */\n    /* Round Up to nearest multiple of 16 : MPEG-4 Standard */\n\n    width_16 = ((width + 15) / 16) * 16;            /* Round up to nearest multiple of 16 */\n    height_16 = ((height + 15) / 16) * 16;          /* Round up to nearest multiple of 16 */\n\n    video->input = vid_in;  /* point to the frame input */\n\n    /*//  End ////////////////////////////// */\n\n\n    /**************************************/\n    /* Determine VOP Type                 */\n    /* 6/2/2001, separate function      */\n    /**************************************/\n    DetermineVopType(video, currLayer);\n\n    /****************************/\n    /*    Initialize VOP        */\n    /****************************/\n    video->currVop->volID = currVol->volID;\n    video->currVop->width = width_16;\n    video->currVop->height = height_16;\n    if (video->encParams->H263_Enabled) /*  11/28/05 */\n    {\n        video->currVop->pitch = width_16;\n    }\n    else\n    {\n        video->currVop->pitch = width_16 + 32;\n    }\n    video->currVop->timeInc = currVol->timeIncrement;\n    video->currVop->vopCoded = 1;\n    video->currVop->roundingType = 0;\n    video->currVop->intraDCVlcThr = encParams->IntraDCVlcThr;\n\n    if (currLayer == 0\n#ifdef RANDOM_REFSELCODE   /* add random selection of reference Vop */\n            || random_val[rand_idx] || video->volInitialize[currLayer]\n#endif\n       )\n    {\n        video->tempForwRefVop = video->forwardRefVop; /* keep initial state */\n        if (video->tempForwRefVop != NULL) video->tempRefSelCode = video->tempForwRefVop->refSelectCode;\n\n        video->forwardRefVop = video->prevBaseVop;\n        video->forwardRefVop->refSelectCode = 1;\n    }\n#ifdef RANDOM_REFSELCODE\n    else\n    {\n        video->tempForwRefVop = video->forwardRefVop; /* keep initial state */\n        if (video->tempForwRefVop != NULL) video->tempRefSelCode = video->tempForwRefVop->refSelectCode;\n\n        video->forwardRefVop = video->prevEnhanceVop;\n        video->forwardRefVop->refSelectCode = 0;\n    }\n    rand_idx++;\n    rand_idx %= 30;\n#endif\n\n    video->currVop->refSelectCode = video->forwardRefVop->refSelectCode;\n    video->currVop->gobNumber = 0;\n    video->currVop->gobFrameID = video->currVop->predictionType;\n    video->currVop->temporalRef = ((modTime) * 30 / 1001) % 256;\n\n    video->currVop->temporalInterval = 0;\n\n    if (video->currVop->predictionType == I_VOP)\n        video->currVop->quantizer = encParams->InitQuantIvop[currLayer];\n    else\n        video->currVop->quantizer = encParams->InitQuantPvop[currLayer];\n\n    /****************/\n    /* Encode Vop   */\n    /****************/\n    video->slice_coding = 1;\n\n    /*pv_status =*/\n    EncodeVop(video);\n\n#ifdef _PRINT_STAT\n    if (video->currVop->predictionType == I_VOP)\n        printf(\" I-VOP \");\n    else\n        printf(\" P-VOP (ref.%d)\", video->forwardRefVop->refSelectCode);\n#endif\n\n    /* Set HintTrack Information */\n    if (currVol->prevModuloTimeBase)\n        video->hintTrackInfo.MTB = 1;\n    else\n        video->hintTrackInfo.MTB = 0;\n\n    video->hintTrackInfo.LayerID = (UChar)currVol->volID;\n    video->hintTrackInfo.CodeType = (UChar)video->currVop->predictionType;\n    video->hintTrackInfo.RefSelCode = (UChar)video->currVop->refSelectCode;\n\n    return status;\n}\n#endif /* NO_SLICE_ENCODE */\n\n#ifndef NO_SLICE_ENCODE\n/* ======================================================================== */\n/*  Function : PVEncodePacket()                                             */\n/*  Date     : 04/18/2002                                                   */\n/*  Purpose  : Encode one packet and return bitstream                       */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\nOSCL_EXPORT_REF Bool PVEncodeSlice(VideoEncControls *encCtrl, UChar *bstream, Int *size,\n                                   Int *endofFrame, VideoEncFrameIO *vid_out, ULong *nextModTime)\n{\n    PV_STATUS pv_status;\n    VideoEncData *video = (VideoEncData *)encCtrl->videoEncoderData;\n    VideoEncParams *encParams = video->encParams;\n    Vol *currVol;\n    PV_STATUS   EncodeVOS_Start(VideoEncControls *encCtrl);\n    Vop *temp;\n    void  PaddingEdge(Vop *padVop);\n    Int currLayer = video->currLayer;\n    Int pre_skip;\n    Int pre_size;\n    /**************************************************************/\n    /* Initialize Vol stream structure with application bitstream */\n    /**************************************************************/\n\n    currVol = video->vol[currLayer];\n    currVol->stream->bitstreamBuffer = bstream;\n    pre_size = currVol->stream->byteCount;\n    currVol->stream->bufferSize = pre_size + (*size);\n\n    /***********************************************************/\n    /* Encode VOS and VOL Headers on first call for each layer */\n    /***********************************************************/\n\n    if (video->volInitialize[currLayer])\n    {\n        if (!video->encParams->GetVolHeader[currLayer])\n            pv_status = EncodeVOS_Start(encCtrl);\n    }\n\n    /****************/\n    /* Encode Slice */\n    /****************/\n    pv_status = EncodeSlice(video);\n\n    *endofFrame = 0;\n\n    if (video->mbnum >= currVol->nTotalMB && !video->end_of_buf)\n    {\n        *endofFrame = 1;\n\n        /************************************/\n        /* Update Skip Next Frame           */\n        /************************************/\n        pre_skip = UpdateSkipNextFrame(video, nextModTime, size, pv_status); /* modified such that no pre-skipped */\n\n        if (pre_skip == -1) /* error */\n        {\n            *endofFrame = -1;\n            /* make sure that pointers are restored to the previous state */\n            if (currLayer == 0)\n            {\n                video->forwardRefVop = video->tempForwRefVop; /* For P-Vop base only */\n                video->forwardRefVop->refSelectCode = video->tempRefSelCode;\n            }\n\n            return pv_status;\n        }\n\n        /* If I-VOP was encoded, reset IntraPeriod */\n        if ((currLayer == 0) && (encParams->IntraPeriod > 0) && (video->currVop->predictionType == I_VOP))\n            video->nextEncIVop = encParams->IntraPeriod;\n\n        /**********************************************************/\n        /* Copy Reconstructed Buffer to Output Video Frame Buffer */\n        /**********************************************************/\n        vid_out->yChan = video->currVop->yChan;\n        vid_out->uChan = video->currVop->uChan;\n        vid_out->vChan = video->currVop->vChan;\n        if (video->encParams->H263_Enabled)\n        {\n            vid_out->height = video->currVop->height; /* padded height */\n            vid_out->pitch = video->currVop->width; /* padded width */\n        }\n        else\n        {\n            vid_out->height = video->currVop->height + 32; /* padded height */\n            vid_out->pitch = video->currVop->width + 32; /* padded width */\n        }\n        //vid_out->timestamp = video->modTime;\n        vid_out->timestamp = (ULong)(((video->prevFrameNum[currLayer] * 1000) / encParams->LayerFrameRate[currLayer]) + video->modTimeRef + 0.5);\n\n        /*// End /////////////////////// */\n\n        /****************************************/\n        /* Swap Vop Pointers for Base Layer     */\n        /****************************************/\n\n        if (currLayer == 0)\n        {\n            temp = video->prevBaseVop;\n            video->prevBaseVop = video->currVop;\n            video->prevBaseVop->padded = 0; /* not padded */\n            video->currVop = temp;\n            video->forwardRefVop = video->prevBaseVop; /* For P-Vop base only */\n            video->forwardRefVop->refSelectCode = 1;\n        }\n        else\n        {\n            temp = video->prevEnhanceVop;\n            video->prevEnhanceVop = video->currVop;\n            video->prevEnhanceVop->padded = 0; /* not padded */\n            video->currVop = temp;\n            video->forwardRefVop = video->prevEnhanceVop;\n            video->forwardRefVop->refSelectCode = 0;\n        }\n    }\n\n    /***********************************/\n    /* Update Ouput bstream byte count */\n    /***********************************/\n\n    *size = currVol->stream->byteCount - pre_size;\n\n    /****************************************/\n    /* Modify the intialize flag at the end.*/\n    /****************************************/\n    if (video->volInitialize[currLayer])\n        video->volInitialize[currLayer] = 0;\n\n    return pv_status;\n}\n#endif /* NO_SLICE_ENCODE */\n\n\n/* ======================================================================== */\n/*  Function : PVGetH263ProfileLevelID()                                    */\n/*  Date     : 02/05/2003                                                   */\n/*  Purpose  : Get H.263 Profile ID and level ID for profile 0              */\n/*  In/out   : Profile ID=0, levelID is what we want                        */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*  Note     : h263Level[8], rBR_bound[8], max_h263_framerate[2]            */\n/*             max_h263_width[2], max_h263_height[2] are global             */\n/*                                                                          */\n/* ======================================================================== */\nOSCL_EXPORT_REF Bool PVGetH263ProfileLevelID(VideoEncControls *encCtrl, Int *profileID, Int *levelID)\n{\n    VideoEncData *encData;\n    Int width, height;\n    float bitrate_r, framerate;\n\n\n    /* For this version, we only support H.263 profile 0 */\n    *profileID = 0;\n\n    *levelID = 0;\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (encData == NULL)\n        return PV_FALSE;\n    if (encData->encParams == NULL)\n        return PV_FALSE;\n\n    if (!encData->encParams->H263_Enabled) return PV_FALSE;\n\n\n    /* get image width, height, bitrate and framerate */\n    width     = encData->encParams->LayerWidth[0];\n    height    = encData->encParams->LayerHeight[0];\n    bitrate_r = (float)(encData->encParams->LayerBitRate[0]) / (float)64000.0;\n    framerate = encData->encParams->LayerFrameRate[0];\n    if (!width || !height || !(bitrate_r > 0 && framerate > 0)) return PV_FALSE;\n\n    /* This is the most frequent case : level 10 */\n    if (bitrate_r <= rBR_bound[1] && framerate <= max_h263_framerate[0] &&\n            (width <= max_h263_width[0] && height <= max_h263_height[0]))\n    {\n        *levelID = h263Level[1];\n        return PV_TRUE;\n    }\n    else if (bitrate_r > rBR_bound[4] ||\n             (width > max_h263_width[1] || height > max_h263_height[1]) ||\n             framerate > max_h263_framerate[1])    /* check the highest level 70 */\n    {\n        *levelID = h263Level[7];\n        return PV_TRUE;\n    }\n    else   /* search level 20, 30, 40 */\n    {\n\n        /* pick out level 20 */\n        if (bitrate_r <= rBR_bound[2] &&\n                ((width <= max_h263_width[0] && height <= max_h263_height[0] && framerate <= max_h263_framerate[1]) ||\n                 (width <= max_h263_width[1] && height <= max_h263_height[1] && framerate <= max_h263_framerate[0])))\n        {\n            *levelID = h263Level[2];\n            return PV_TRUE;\n        }\n        else   /* width, height and framerate are ok, now choose level 30 or 40 */\n        {\n            *levelID = (bitrate_r <= rBR_bound[3] ? h263Level[3] : h263Level[4]);\n            return PV_TRUE;\n        }\n    }\n}\n\n/* ======================================================================== */\n/*  Function : PVGetMPEG4ProfileLevelID()                                   */\n/*  Date     : 26/06/2008                                                   */\n/*  Purpose  : Get MPEG4 Level after initialized                            */\n/*  In/out   : profile_level according to interface                         */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\nOSCL_EXPORT_REF Bool PVGetMPEG4ProfileLevelID(VideoEncControls *encCtrl, Int *profile_level, Int nLayer)\n{\n    VideoEncData* video;\n    Int i;\n\n    video = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (nLayer == 0)\n    {\n        for (i = 0; i < MAX_BASE_PROFILE + 1; i++)\n        {\n            if (video->encParams->ProfileLevel[0] == profile_level_code[i])\n            {\n                break;\n            }\n        }\n        *profile_level = i;\n    }\n    else\n    {\n        for (i = 0; i < MAX_SCALABLE_PROFILE - MAX_BASE_PROFILE; i++)\n        {\n            if (video->encParams->ProfileLevel[1] == scalable_profile_level_code[i])\n            {\n                break;\n            }\n        }\n        *profile_level = i + MAX_BASE_PROFILE + 1;\n    }\n\n    return true;\n}\n\n#ifndef LIMITED_API\n/* ======================================================================== */\n/*  Function : PVUpdateEncFrameRate                                         */\n/*  Date     : 04/08/2002                                                   */\n/*  Purpose  : Update target frame rates of the encoded base and enhance    */\n/*             layer(if any) while encoding operation is ongoing            */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF Bool PVUpdateEncFrameRate(VideoEncControls *encCtrl, float *frameRate)\n{\n    VideoEncData    *encData;\n    Int i;// nTotalMB, mbPerSec;\n\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (encData == NULL)\n        return PV_FALSE;\n    if (encData->encParams == NULL)\n        return PV_FALSE;\n\n    /* Update the framerates for all the layers */\n    for (i = 0; i < encData->encParams->nLayers; i++)\n    {\n\n        /* New check: encoding framerate should be consistent with the given profile and level */\n        //nTotalMB = (((encData->encParams->LayerWidth[i]+15)/16)*16)*(((encData->encParams->LayerHeight[i]+15)/16)*16)/(16*16);\n        //mbPerSec = (Int)(nTotalMB * frameRate[i]);\n        //if(mbPerSec > encData->encParams->LayerMaxMbsPerSec[i]) return PV_FALSE;\n        if (frameRate[i] > encData->encParams->LayerMaxFrameRate[i]) return PV_FALSE; /* set by users or profile */\n\n        encData->encParams->LayerFrameRate[i] = frameRate[i];\n    }\n\n    RC_UpdateBXRCParams((void*) encData);\n    return PV_TRUE;\n}\n#endif\n#ifndef LIMITED_API\n/* ======================================================================== */\n/*  Function : PVUpdateBitRate                                              */\n/*  Date     : 04/08/2002                                                   */\n/*  Purpose  : Update target bit rates of the encoded base and enhance      */\n/*             layer(if any) while encoding operation is ongoing            */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF Bool PVUpdateBitRate(VideoEncControls *encCtrl, Int *bitRate)\n{\n    VideoEncData    *encData;\n    Int i;\n\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (encData == NULL)\n        return PV_FALSE;\n    if (encData->encParams == NULL)\n        return PV_FALSE;\n\n    /* Update the bitrates for all the layers */\n    for (i = 0; i < encData->encParams->nLayers; i++)\n    {\n        if (bitRate[i] > encData->encParams->LayerMaxBitRate[i]) /* set by users or profile */\n        {\n            return PV_FALSE;\n        }\n        encData->encParams->LayerBitRate[i] = bitRate[i];\n    }\n\n    RC_UpdateBXRCParams((void*) encData);\n    return PV_TRUE;\n\n}\n#endif\n#ifndef LIMITED_API\n/* ============================================================================ */\n/*  Function : PVUpdateVBVDelay()                                                   */\n/*  Date     : 4/23/2004                                                        */\n/*  Purpose  : Update VBV buffer size(in delay)                                 */\n/*  In/out   :                                                                  */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                        */\n/*  Modified :                                                                  */\n/*                                                                              */\n/* ============================================================================ */\n\nBool PVUpdateVBVDelay(VideoEncControls *encCtrl, float delay)\n{\n\n    VideoEncData    *encData;\n    Int total_bitrate, max_buffer_size;\n    int index;\n\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (encData == NULL)\n        return PV_FALSE;\n    if (encData->encParams == NULL)\n        return PV_FALSE;\n\n    /* Check whether the input delay is valid based on the given profile */\n    total_bitrate   = (encData->encParams->nLayers == 1 ? encData->encParams->LayerBitRate[0] :\n                       encData->encParams->LayerBitRate[1]);\n    index = encData->encParams->profile_table_index;\n    max_buffer_size = (encData->encParams->nLayers == 1 ? profile_level_max_VBV_size[index] :\n                       scalable_profile_level_max_VBV_size[index]);\n\n    if (total_bitrate*delay > (float)max_buffer_size)\n        return PV_FALSE;\n\n    encData->encParams->VBV_delay = delay;\n    return PV_TRUE;\n\n}\n#endif\n#ifndef LIMITED_API\n/* ======================================================================== */\n/*  Function : PVUpdateIFrameInterval()                                         */\n/*  Date     : 04/10/2002                                                   */\n/*  Purpose  : updates the INTRA frame refresh interval while encoding      */\n/*             is ongoing                                                   */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF Bool PVUpdateIFrameInterval(VideoEncControls *encCtrl, Int aIFramePeriod)\n{\n    VideoEncData    *encData;\n\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (encData == NULL)\n        return PV_FALSE;\n    if (encData->encParams == NULL)\n        return PV_FALSE;\n\n    encData->encParams->IntraPeriod = aIFramePeriod;\n    return PV_TRUE;\n}\n#endif\n#ifndef LIMITED_API\n/* ======================================================================== */\n/*  Function : PVSetNumIntraMBRefresh()                                     */\n/*  Date     : 08/05/2003                                                   */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\nOSCL_EXPORT_REF Bool    PVUpdateNumIntraMBRefresh(VideoEncControls *encCtrl, Int numMB)\n{\n    VideoEncData    *encData;\n\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (encData == NULL)\n        return PV_FALSE;\n\n    encData->encParams->Refresh = numMB;\n\n    return PV_TRUE;\n}\n#endif\n#ifndef LIMITED_API\n/* ======================================================================== */\n/*  Function : PVIFrameRequest()                                            */\n/*  Date     : 04/10/2002                                                   */\n/*  Purpose  : encodes the next base frame as an I-Vop                      */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF Bool PVIFrameRequest(VideoEncControls *encCtrl)\n{\n    VideoEncData    *encData;\n\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (encData == NULL)\n        return PV_FALSE;\n    if (encData->encParams == NULL)\n        return PV_FALSE;\n\n    encData->nextEncIVop = 1;\n    return PV_TRUE;\n}\n#endif\n#ifndef LIMITED_API\n/* ======================================================================== */\n/*  Function : PVGetEncMemoryUsage()                                        */\n/*  Date     : 10/17/2000                                                   */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF Int PVGetEncMemoryUsage(VideoEncControls *encCtrl)\n{\n    VideoEncData    *encData;\n\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (encData == NULL)\n        return PV_FALSE;\n    if (encData->encParams == NULL)\n        return PV_FALSE;\n    return encData->encParams->MemoryUsage;\n}\n#endif\n\n/* ======================================================================== */\n/*  Function : PVGetHintTrack()                                             */\n/*  Date     : 1/17/2001,                                                   */\n/*  Purpose  :                                                              */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF Bool PVGetHintTrack(VideoEncControls *encCtrl, MP4HintTrack *info)\n{\n    VideoEncData    *encData;\n\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (encData == NULL)\n        return PV_FALSE;\n    if (encData->encParams == NULL)\n        return PV_FALSE;\n    info->MTB = encData->hintTrackInfo.MTB;\n    info->LayerID = encData->hintTrackInfo.LayerID;\n    info->CodeType = encData->hintTrackInfo.CodeType;\n    info->RefSelCode = encData->hintTrackInfo.RefSelCode;\n\n    return PV_TRUE;\n}\n\n/* ======================================================================== */\n/*  Function : PVGetMaxVideoFrameSize()                                     */\n/*  Date     : 7/17/2001,                                                   */\n/*  Purpose  : Function merely returns the maximum buffer size              */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF Bool PVGetMaxVideoFrameSize(VideoEncControls *encCtrl, Int *maxVideoFrameSize)\n{\n    VideoEncData    *encData;\n\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (encData == NULL)\n        return PV_FALSE;\n    if (encData->encParams == NULL)\n        return PV_FALSE;\n\n\n\n    *maxVideoFrameSize = encData->encParams->BufferSize[0];\n\n    if (encData->encParams->nLayers == 2)\n        if (*maxVideoFrameSize < encData->encParams->BufferSize[1])\n            *maxVideoFrameSize = encData->encParams->BufferSize[1];\n    *maxVideoFrameSize >>= 3;   /* Convert to Bytes */\n\n    if (*maxVideoFrameSize <= 4000)\n        *maxVideoFrameSize = 4000;\n\n    return PV_TRUE;\n}\n#ifndef LIMITED_API\n/* ======================================================================== */\n/*  Function : PVGetVBVSize()                                               */\n/*  Date     : 4/15/2002                                                    */\n/*  Purpose  : Function merely returns the maximum buffer size              */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nOSCL_EXPORT_REF Bool PVGetVBVSize(VideoEncControls *encCtrl, Int *VBVSize)\n{\n    VideoEncData    *encData;\n\n    encData = (VideoEncData *)encCtrl->videoEncoderData;\n\n    if (encData == NULL)\n        return PV_FALSE;\n    if (encData->encParams == NULL)\n        return PV_FALSE;\n\n    *VBVSize = encData->encParams->BufferSize[0];\n    if (encData->encParams->nLayers == 2)\n        *VBVSize += encData->encParams->BufferSize[1];\n\n    return PV_TRUE;\n\n}\n#endif\n/* ======================================================================== */\n/*  Function : EncodeVOS_Start()                                            */\n/*  Date     : 08/22/2000                                                   */\n/*  Purpose  : Encodes the VOS,VO, and VOL or Short Headers                 */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\nPV_STATUS EncodeVOS_Start(VideoEncControls *encoderControl)\n{\n\n    VideoEncData *video = (VideoEncData *)encoderControl->videoEncoderData;\n    Vol         *currVol = video->vol[video->currLayer];\n    PV_STATUS status = PV_SUCCESS;\n    //int profile_level=0x01;\n    BitstreamEncVideo *stream = video->bitstream1;\n    int i, j;\n\n    /********************************/\n    /* Check for short_video_header */\n    /********************************/\n    if (currVol->shortVideoHeader == 1)\n        return status;\n    else\n    {\n        /* Short Video Header or M4V */\n\n        /**************************/\n        /* VisualObjectSequence ()*/\n        /**************************/\n        status = BitstreamPutGT16Bits(stream, 32, SESSION_START_CODE);\n        /*  Determine profile_level */\n        status = BitstreamPutBits(stream, 8, video->encParams->ProfileLevel[video->currLayer]);\n\n        /******************/\n        /* VisualObject() */\n        /******************/\n\n        status = BitstreamPutGT16Bits(stream, 32, VISUAL_OBJECT_START_CODE);\n        status = BitstreamPut1Bits(stream, 0x00); /* visual object identifier */\n        status = BitstreamPutBits(stream, 4, 0x01); /* visual object Type == \"video ID\" */\n        status = BitstreamPut1Bits(stream, 0x00); /* no video signal type */\n\n        /*temp   = */\n        BitstreamMpeg4ByteAlignStuffing(stream);\n\n\n        status = BitstreamPutGT16Bits(stream, 27, VO_START_CODE);/* byte align: should be 2 bits */\n        status = BitstreamPutBits(stream, 5, 0x00);/*  Video ID = 0  */\n\n\n\n        /**********************/\n        /* VideoObjectLayer() */\n        /**********************/\n        if (currVol->shortVideoHeader == 0)\n        { /* M4V  else Short Video Header */\n            status = BitstreamPutGT16Bits(stream, VOL_START_CODE_LENGTH, VOL_START_CODE);\n            status = BitstreamPutBits(stream, 4, currVol->volID);/*  video_object_layer_id */\n            status = BitstreamPut1Bits(stream, 0x00);/*  Random Access = 0  */\n\n            if (video->currLayer == 0)\n                status = BitstreamPutBits(stream, 8, 0x01);/* Video Object Type Indication = 1  ... Simple Object Type */\n            else\n                status = BitstreamPutBits(stream, 8, 0x02);/* Video Object Type Indication = 2  ... Simple Scalable Object Type */\n\n            status = BitstreamPut1Bits(stream, 0x00);/*  is_object_layer_identifer = 0 */\n\n\n            status = BitstreamPutBits(stream, 4, 0x01); /* aspect_ratio_info = 1 ... 1:1(Square) */\n            status = BitstreamPut1Bits(stream, 0x00);/* vol_control_parameters = 0 */\n            status = BitstreamPutBits(stream, 2, 0x00);/* video_object_layer_shape = 00 ... rectangular */\n            status = BitstreamPut1Bits(stream, 0x01);/* marker bit */\n            status = BitstreamPutGT8Bits(stream, 16, currVol->timeIncrementResolution);/* vop_time_increment_resolution */\n            status = BitstreamPut1Bits(stream, 0x01);/* marker bit */\n            status = BitstreamPut1Bits(stream, currVol->fixedVopRate);/* fixed_vop_rate = 0 */\n\n            /* For Rectangular VO layer shape */\n            status = BitstreamPut1Bits(stream, 0x01);/* marker bit */\n            status = BitstreamPutGT8Bits(stream, 13, currVol->width);/* video_object_layer_width */\n            status = BitstreamPut1Bits(stream, 0x01);/* marker bit */\n            status = BitstreamPutGT8Bits(stream, 13, currVol->height);/* video_object_layer_height */\n            status = BitstreamPut1Bits(stream, 0x01);/*marker bit */\n\n            status = BitstreamPut1Bits(stream, 0x00);/*interlaced = 0 */\n            status = BitstreamPut1Bits(stream, 0x01);/* obmc_disable = 1 */\n            status = BitstreamPut1Bits(stream, 0x00);/* sprite_enable = 0 */\n            status = BitstreamPut1Bits(stream, 0x00);/* not_8_bit = 0 */\n            status = BitstreamPut1Bits(stream, currVol->quantType);/*   quant_type */\n\n            if (currVol->quantType)\n            {\n                status = BitstreamPut1Bits(stream, currVol->loadIntraQuantMat); /* Intra quant matrix */\n                if (currVol->loadIntraQuantMat)\n                {\n                    for (j = 63; j >= 1; j--)\n                        if (currVol->iqmat[*(zigzag_i+j)] != currVol->iqmat[*(zigzag_i+j-1)])\n                            break;\n                    if ((j == 1) && (currVol->iqmat[*(zigzag_i+j)] == currVol->iqmat[*(zigzag_i+j-1)]))\n                        j = 0;\n                    for (i = 0; i < j + 1; i++)\n                        BitstreamPutBits(stream, 8, currVol->iqmat[*(zigzag_i+i)]);\n                    if (j < 63)\n                        BitstreamPutBits(stream, 8, 0);\n                }\n                else\n                {\n                    for (j = 0; j < 64; j++)\n                        currVol->iqmat[j] = mpeg_iqmat_def[j];\n\n                }\n                status = BitstreamPut1Bits(stream, currVol->loadNonIntraQuantMat); /* Non-Intra quant matrix */\n                if (currVol->loadNonIntraQuantMat)\n                {\n                    for (j = 63; j >= 1; j--)\n                        if (currVol->niqmat[*(zigzag_i+j)] != currVol->niqmat[*(zigzag_i+j-1)])\n                            break;\n                    if ((j == 1) && (currVol->niqmat[*(zigzag_i+j)] == currVol->niqmat[*(zigzag_i+j-1)]))\n                        j = 0;\n                    for (i = 0; i < j + 1; i++)\n                        BitstreamPutBits(stream, 8, currVol->niqmat[*(zigzag_i+i)]);\n                    if (j < 63)\n                        BitstreamPutBits(stream, 8, 0);\n                }\n                else\n                {\n                    for (j = 0; j < 64; j++)\n                        currVol->niqmat[j] = mpeg_nqmat_def[j];\n                }\n            }\n\n            status = BitstreamPut1Bits(stream, 0x01);   /* complexity_estimation_disable = 1 */\n            status = BitstreamPut1Bits(stream, currVol->ResyncMarkerDisable);/* Resync_marker_disable */\n            status = BitstreamPut1Bits(stream, currVol->dataPartitioning);/* Data partitioned */\n\n            if (currVol->dataPartitioning)\n                status = BitstreamPut1Bits(stream, currVol->useReverseVLC); /* Reversible_vlc */\n\n\n            if (currVol->scalability) /* Scalability*/\n            {\n\n                status = BitstreamPut1Bits(stream, currVol->scalability);/* Scalability = 1 */\n                status = BitstreamPut1Bits(stream, currVol->scalType);/* hierarchy _type ... Spatial= 0 and Temporal = 1 */\n                status = BitstreamPutBits(stream, 4, currVol->refVolID);/* ref_layer_id  */\n                status = BitstreamPut1Bits(stream, currVol->refSampDir);/* ref_layer_sampling_direc*/\n                status = BitstreamPutBits(stream, 5, currVol->horSamp_n);/*hor_sampling_factor_n*/\n                status = BitstreamPutBits(stream, 5, currVol->horSamp_m);/*hor_sampling_factor_m*/\n                status = BitstreamPutBits(stream, 5, currVol->verSamp_n);/*vert_sampling_factor_n*/\n                status = BitstreamPutBits(stream, 5, currVol->verSamp_m);/*vert_sampling_factor_m*/\n                status = BitstreamPut1Bits(stream, currVol->enhancementType);/* enhancement_type*/\n            }\n            else /* No Scalability */\n                status = BitstreamPut1Bits(stream, currVol->scalability);/* Scalability = 0 */\n\n            /*temp = */\n            BitstreamMpeg4ByteAlignStuffing(stream); /* Byte align Headers for VOP */\n        }\n    }\n\n    return status;\n}\n\n/* ======================================================================== */\n/*  Function : VOS_End()                                                    */\n/*  Date     : 08/22/2000                                                   */\n/*  Purpose  : Visual Object Sequence End                                   */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nPV_STATUS VOS_End(VideoEncControls *encoderControl)\n{\n    PV_STATUS status = PV_SUCCESS;\n    VideoEncData *video = (VideoEncData *)encoderControl->videoEncoderData;\n    Vol         *currVol = video->vol[video->currLayer];\n    BitstreamEncVideo *stream = currVol->stream;\n\n\n    status = BitstreamPutBits(stream, SESSION_END_CODE, 32);\n\n    return status;\n}\n\n/* ======================================================================== */\n/*  Function : DetermineCodingLayer                                         */\n/*  Date     : 06/02/2001                                                   */\n/*  Purpose  : Find layer to code based on current mod time, assuming that\n               it's time to encode enhanced layer.                          */\n/*  In/out   :                                                              */\n/*  Return   : Number of layer to code.                                     */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nInt DetermineCodingLayer(VideoEncData *video, Int *nLayer, ULong modTime)\n{\n    Vol **vol = video->vol;\n    VideoEncParams *encParams = video->encParams;\n    Int numLayers = encParams->nLayers;\n    UInt modTimeRef = video->modTimeRef;\n    float *LayerFrameRate = encParams->LayerFrameRate;\n    UInt frameNum[4], frameTick;\n    ULong frameModTime, nextFrmModTime;\n#ifdef REDUCE_FRAME_VARIANCE    /* To limit how close 2 frames can be */\n    float frameInterval;\n#endif\n    float srcFrameInterval;\n    Int frameInc;\n    Int i, extra_skip;\n    Int encodeVop = 0;\n\n    i = numLayers - 1;\n\n    if (modTime - video->nextModTime > ((ULong)(-1)) >> 1) /* next time wrapped around */\n        return 0; /* not time to code it yet */\n\n    video->relLayerCodeTime[i] -= 1000;\n    video->nextEncIVop--;  /* number of Vops in highest layer resolution. */\n    video->numVopsInGOP++;\n\n    /* from this point frameModTime and nextFrmModTime are internal */\n\n    frameNum[i] = (UInt)((modTime - modTimeRef) * LayerFrameRate[i] + 500) / 1000;\n    if (video->volInitialize[i])\n    {\n        video->prevFrameNum[i] = frameNum[i] - 1;\n    }\n    else if (frameNum[i] <= video->prevFrameNum[i])\n    {\n        return 0; /* do not encode this frame */\n    }\n\n    /**** this part computes expected next frame *******/\n    frameModTime = (ULong)(((frameNum[i] * 1000) / LayerFrameRate[i]) + modTimeRef + 0.5); /* rec. time */\n    nextFrmModTime = (ULong)((((frameNum[i] + 1) * 1000) / LayerFrameRate[i]) + modTimeRef + 0.5); /* rec. time */\n\n    srcFrameInterval = 1000 / video->FrameRate;\n\n    video->nextModTime = nextFrmModTime - (ULong)(srcFrameInterval / 2.) - 1; /* between current and next frame */\n\n#ifdef REDUCE_FRAME_VARIANCE    /* To limit how close 2 frames can be */\n    frameInterval = 1000 / LayerFrameRate[i]; /* next rec. time */\n    delta = (Int)(frameInterval / 4); /* empirical number */\n    if (video->nextModTime - modTime  < (ULong)delta) /* need to move nextModTime further. */\n    {\n        video->nextModTime += ((delta - video->nextModTime + modTime)); /* empirical formula  */\n    }\n#endif\n    /****************************************************/\n\n    /* map frame no.to tick from modTimeRef */\n    /*frameTick = (frameNum[i]*vol[i]->timeIncrementResolution) ;\n    frameTick = (UInt)((frameTick + (encParams->LayerFrameRate[i]/2))/encParams->LayerFrameRate[i]);*/\n    /*  11/16/01, change frameTick to be the closest tick from the actual modTime */\n    /*  12/12/02, add (double) to prevent large number wrap-around */\n    frameTick = (Int)(((double)(modTime - modTimeRef) * vol[i]->timeIncrementResolution + 500) / 1000);\n\n    /* find timeIncrement to be put in the bitstream */\n    /* refTick is second boundary reference. */\n    vol[i]->timeIncrement = frameTick - video->refTick[i];\n\n\n    vol[i]->moduloTimeBase = 0;\n    while (vol[i]->timeIncrement >= vol[i]->timeIncrementResolution)\n    {\n        vol[i]->timeIncrement -= vol[i]->timeIncrementResolution;\n        vol[i]->moduloTimeBase++;\n        /* do not update refTick and modTimeRef yet, do it after encoding!! */\n    }\n\n    if (video->relLayerCodeTime[i] <= 0)    /* no skipping */\n    {\n        encodeVop = 1;\n        video->currLayer = *nLayer = i;\n        video->relLayerCodeTime[i] += 1000;\n\n        /* takes care of more dropped frame than expected */\n        extra_skip = -1;\n        frameInc = (frameNum[i] - video->prevFrameNum[i]);\n        extra_skip += frameInc;\n\n        if (extra_skip > 0)\n        {   /* update rc->Nr, rc->B, (rc->Rr)*/\n            video->nextEncIVop -= extra_skip;\n            video->numVopsInGOP += extra_skip;\n            if (encParams->RC_Type != CONSTANT_Q)\n            {\n                RC_UpdateBuffer(video, i, extra_skip);\n            }\n        }\n\n    }\n    /* update frame no. */\n    video->prevFrameNum[i] = frameNum[i];\n\n    /* go through all lower layer */\n    for (i = (numLayers - 2); i >= 0; i--)\n    {\n\n        video->relLayerCodeTime[i] -= 1000;\n\n        /* find timeIncrement to be put in the bitstream */\n        vol[i]->timeIncrement = frameTick - video->refTick[i];\n\n        if (video->relLayerCodeTime[i] <= 0) /* time to encode base */\n        {\n            /* 12/27/00 */\n            encodeVop = 1;\n            video->currLayer = *nLayer = i;\n            video->relLayerCodeTime[i] +=\n                (Int)((1000.0 * encParams->LayerFrameRate[numLayers-1]) / encParams->LayerFrameRate[i]);\n\n            vol[i]->moduloTimeBase = 0;\n            while (vol[i]->timeIncrement >= vol[i]->timeIncrementResolution)\n            {\n                vol[i]->timeIncrement -= vol[i]->timeIncrementResolution;\n                vol[i]->moduloTimeBase++;\n                /* do not update refTick and modTimeRef yet, do it after encoding!! */\n            }\n\n            /* takes care of more dropped frame than expected */\n            frameNum[i] = (UInt)((frameModTime - modTimeRef) * encParams->LayerFrameRate[i] + 500) / 1000;\n            if (video->volInitialize[i])\n                video->prevFrameNum[i] = frameNum[i] - 1;\n\n            extra_skip = -1;\n            frameInc = (frameNum[i] - video->prevFrameNum[i]);\n            extra_skip += frameInc;\n\n            if (extra_skip > 0)\n            {   /* update rc->Nr, rc->B, (rc->Rr)*/\n                if (encParams->RC_Type != CONSTANT_Q)\n                {\n                    RC_UpdateBuffer(video, i, extra_skip);\n                }\n            }\n            /* update frame no. */\n            video->prevFrameNum[i] = frameNum[i];\n        }\n    }\n\n#ifdef _PRINT_STAT\n    if (encodeVop)\n        printf(\" TI: %d \", vol[*nLayer]->timeIncrement);\n#endif\n\n    return encodeVop;\n}\n\n/* ======================================================================== */\n/*  Function : DetermineVopType                                             */\n/*  Date     : 06/02/2001                                                   */\n/*  Purpose  : The name says it all.                                        */\n/*  In/out   :                                                              */\n/*  Return   : void .                                                       */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nvoid DetermineVopType(VideoEncData *video, Int currLayer)\n{\n    VideoEncParams *encParams = video->encParams;\n//  Vol *currVol = video->vol[currLayer];\n\n    if (encParams->IntraPeriod == 0) /* I-VOPs only */\n    {\n        if (video->currLayer > 0)\n            video->currVop->predictionType = P_VOP;\n        else\n        {\n            video->currVop->predictionType = I_VOP;\n            if (video->numVopsInGOP >= 132)\n                video->numVopsInGOP = 0;\n        }\n    }\n    else if (encParams->IntraPeriod == -1)  /* IPPPPP... */\n    {\n\n        /* maintain frame type if previous frame is pre-skipped, 06/02/2001 */\n        if (encParams->RC_Type == CONSTANT_Q || video->rc[currLayer]->skip_next_frame != -1)\n            video->currVop->predictionType = P_VOP;\n\n        if (video->currLayer == 0)\n        {\n            if (/*video->numVopsInGOP>=132 || */video->volInitialize[currLayer])\n            {\n                video->currVop->predictionType = I_VOP;\n                video->numVopsInGOP = 0; /* force INTRA update every 132 base frames*/\n                video->nextEncIVop = 1;\n            }\n            else if (video->nextEncIVop == 0 || video->currVop->predictionType == I_VOP)\n            {\n                video->numVopsInGOP = 0;\n                video->nextEncIVop = 1;\n            }\n        }\n    }\n    else   /* IntraPeriod>0 : IPPPPPIPPPPPI... */\n    {\n\n        /* maintain frame type if previous frame is pre-skipped, 06/02/2001 */\n        if (encParams->RC_Type == CONSTANT_Q || video->rc[currLayer]->skip_next_frame != -1)\n            video->currVop->predictionType = P_VOP;\n\n        if (currLayer == 0)\n        {\n            if (video->nextEncIVop <= 0 || video->currVop->predictionType == I_VOP)\n            {\n                video->nextEncIVop = encParams->IntraPeriod;\n                video->currVop->predictionType = I_VOP;\n                video->numVopsInGOP = 0;\n            }\n        }\n    }\n\n    return ;\n}\n\n/* ======================================================================== */\n/*  Function : UpdateSkipNextFrame                                          */\n/*  Date     : 06/02/2001                                                   */\n/*  Purpose  : From rate control frame skipping decision, update timing\n                related parameters.                                         */\n/*  In/out   :                                                              */\n/*  Return   : Current coded layer.                                         */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nInt UpdateSkipNextFrame(VideoEncData *video, ULong *modTime, Int *size, PV_STATUS status)\n{\n    Int currLayer = video->currLayer;\n    Int nLayer = currLayer;\n    VideoEncParams *encParams = video->encParams;\n    Int numLayers = encParams->nLayers;\n    Vol *currVol = video->vol[currLayer];\n    Vol **vol = video->vol;\n    Int num_skip, extra_skip;\n    Int i;\n    UInt newRefTick, deltaModTime;\n    UInt temp;\n\n    if (encParams->RC_Type != CONSTANT_Q)\n    {\n        if (video->volInitialize[0] && currLayer == 0)  /* always encode the first frame */\n        {\n            RC_ResetSkipNextFrame(video, currLayer);\n            //return currLayer;  09/15/05\n        }\n        else\n        {\n            if (RC_GetSkipNextFrame(video, currLayer) < 0 || status == PV_END_OF_BUF)   /* Skip Current Frame */\n            {\n\n#ifdef _PRINT_STAT\n                printf(\"Skip current frame\");\n#endif\n                currVol->moduloTimeBase = currVol->prevModuloTimeBase;\n\n                /*********************/\n                /* prepare to return */\n                /*********************/\n                *size = 0;  /* Set Bitstream buffer to zero */\n\n                /* Determine nLayer and modTime for next encode */\n\n                *modTime = video->nextModTime;\n                nLayer = -1;\n\n                return nLayer; /* return immediately without updating RefTick & modTimeRef */\n                /* If I-VOP was attempted, then ensure next base is I-VOP */\n                /*if((encParams->IntraPeriod>0) && (video->currVop->predictionType == I_VOP))\n                video->nextEncIVop = 0; commented out by 06/05/01 */\n\n            }\n            else if ((num_skip = RC_GetSkipNextFrame(video, currLayer)) > 0)\n            {\n\n#ifdef _PRINT_STAT\n                printf(\"Skip next %d frames\", num_skip);\n#endif\n                /* to keep the Nr of enh layer the same */\n                /* adjust relLayerCodeTime only, do not adjust layerCodeTime[numLayers-1] */\n                extra_skip = 0;\n                for (i = 0; i < currLayer; i++)\n                {\n                    if (video->relLayerCodeTime[i] <= 1000)\n                    {\n                        extra_skip = 1;\n                        break;\n                    }\n                }\n\n                for (i = currLayer; i < numLayers; i++)\n                {\n                    video->relLayerCodeTime[i] += (num_skip + extra_skip) *\n                                                  ((Int)((1000.0 * encParams->LayerFrameRate[numLayers-1]) / encParams->LayerFrameRate[i]));\n                }\n            }\n        }/* first frame */\n    }\n    /*****  current frame is encoded, now update refTick ******/\n\n    video->refTick[currLayer] += vol[currLayer]->prevModuloTimeBase * vol[currLayer]->timeIncrementResolution;\n\n    /* Reset layerCodeTime every I-VOP to prevent overflow */\n    if (currLayer == 0)\n    {\n        /*  12/12/02, fix for weird targer frame rate of 9.99 fps or 3.33 fps */\n        if (((encParams->IntraPeriod != 0) /*&& (video->currVop->predictionType==I_VOP)*/) ||\n                ((encParams->IntraPeriod == 0) && (video->numVopsInGOP == 0)))\n        {\n            newRefTick = video->refTick[0];\n\n            for (i = 1; i < numLayers; i++)\n            {\n                if (video->refTick[i] < newRefTick)\n                    newRefTick = video->refTick[i];\n            }\n\n            /* check to make sure that the update is integer multiple of frame number */\n            /* how many msec elapsed from last modTimeRef */\n            deltaModTime = (newRefTick / vol[0]->timeIncrementResolution) * 1000;\n\n            for (i = numLayers - 1; i >= 0; i--)\n            {\n                temp = (UInt)(deltaModTime * encParams->LayerFrameRate[i]); /* 12/12/02 */\n                if (temp % 1000)\n                    newRefTick = 0;\n\n            }\n            if (newRefTick > 0)\n            {\n                video->modTimeRef += deltaModTime;\n                for (i = numLayers - 1; i >= 0; i--)\n                {\n                    video->prevFrameNum[i] -= (UInt)(deltaModTime * encParams->LayerFrameRate[i]) / 1000;\n                    video->refTick[i] -= newRefTick;\n                }\n            }\n        }\n    }\n\n    *modTime =  video->nextModTime;\n\n    return nLayer;\n}\n\n\n#ifndef ORIGINAL_VERSION\n\n/* ======================================================================== */\n/*  Function : SetProfile_BufferSize                                        */\n/*  Date     : 04/08/2002                                                   */\n/*  Purpose  : Set profile and video buffer size, copied from Jim's code    */\n/*             in PVInitVideoEncoder(.), since we have different places     */\n/*             to reset profile and video buffer size                       */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nBool SetProfile_BufferSize(VideoEncData *video, float delay, Int bInitialized)\n{\n    Int i, j, start, end;\n//  Int BaseMBsPerSec = 0, EnhMBsPerSec = 0;\n    Int nTotalMB = 0;\n    Int idx, temp_w, temp_h, max = 0, max_width, max_height;\n\n    Int nLayers = video->encParams->nLayers; /* Number of Layers to be encoded */\n\n    Int total_bitrate = 0, base_bitrate;\n    Int total_packet_size = 0, base_packet_size;\n    Int total_MBsPerSec = 0, base_MBsPerSec;\n    Int total_VBV_size = 0, base_VBV_size, enhance_VBV_size = 0;\n    float total_framerate, base_framerate;\n    float upper_bound_ratio;\n    Int bFound = 0;\n    Int k = 0, width16, height16, index;\n    Int lowest_level;\n\n#define MIN_BUFF    16000 /* 16k minimum buffer size */\n#define BUFF_CONST  2.0    /* 2000ms */\n#define UPPER_BOUND_RATIO 8.54 /* upper_bound = 1.4*(1.1+bound/10)*bitrate/framerate */\n\n#define QCIF_WIDTH  176\n#define QCIF_HEIGHT 144\n\n    index = video->encParams->profile_table_index;\n\n    /* Calculate \"nTotalMB\" */\n    /* Find the maximum width*height for memory allocation of the VOPs */\n    for (idx = 0; idx < nLayers; idx++)\n    {\n        temp_w = video->encParams->LayerWidth[idx];\n        temp_h = video->encParams->LayerHeight[idx];\n\n        if ((temp_w*temp_h) > max)\n        {\n            max = temp_w * temp_h;\n            max_width = temp_w;\n            max_height = temp_h;\n            nTotalMB = ((max_width + 15) >> 4) * ((max_height + 15) >> 4);\n        }\n    }\n    upper_bound_ratio = (video->encParams->RC_Type == CBR_LOWDELAY ? (float)5.0 : (float)UPPER_BOUND_RATIO);\n\n\n    /* Get the basic information: bitrate, packet_size, MBs/s and VBV_size */\n    base_bitrate        = video->encParams->LayerBitRate[0];\n    if (video->encParams->LayerMaxBitRate[0] != 0) /* video->encParams->LayerMaxBitRate[0] == 0 means it has not been set */\n    {\n        base_bitrate    = PV_MAX(base_bitrate, video->encParams->LayerMaxBitRate[0]);\n    }\n    else /* if the max is not set, set it to the specified profile/level */\n    {\n        video->encParams->LayerMaxBitRate[0] = profile_level_max_bitrate[index];\n    }\n\n    base_framerate      = video->encParams->LayerFrameRate[0];\n    if (video->encParams->LayerMaxFrameRate[0] != 0)\n    {\n        base_framerate  = PV_MAX(base_framerate, video->encParams->LayerMaxFrameRate[0]);\n    }\n    else /* if the max is not set, set it to the specified profile/level */\n    {\n        video->encParams->LayerMaxFrameRate[0] = (float)profile_level_max_mbsPerSec[index] / nTotalMB;\n    }\n\n    base_packet_size    = video->encParams->ResyncPacketsize;\n    base_MBsPerSec      = (Int)(base_framerate * nTotalMB);\n    base_VBV_size       = PV_MAX((Int)(base_bitrate * delay),\n                                 (Int)(upper_bound_ratio * base_bitrate / base_framerate));\n    base_VBV_size       = PV_MAX(base_VBV_size, MIN_BUFF);\n\n    /* if the buffer is larger than maximum buffer size, we'll clip it */\n    if (base_VBV_size > profile_level_max_VBV_size[SIMPLE_PROFILE_LEVEL5])\n        base_VBV_size = profile_level_max_VBV_size[SIMPLE_PROFILE_LEVEL5];\n\n    /* Check if the buffer exceeds the maximum buffer size given the maximum profile and level */\n    if (nLayers == 1 && base_VBV_size > profile_level_max_VBV_size[index])\n        return FALSE;\n\n\n    if (nLayers == 2) /* check both enhanced and base layer */\n    {\n\n        total_bitrate       = video->encParams->LayerBitRate[1];\n        if (video->encParams->LayerMaxBitRate[1] != 0)\n        {\n            total_bitrate   = PV_MIN(total_bitrate, video->encParams->LayerMaxBitRate[1]);\n        }\n        else /* if the max is not set, set it to the specified profile/level */\n        {\n            video->encParams->LayerMaxBitRate[1] = scalable_profile_level_max_bitrate[index];\n        }\n\n        total_framerate     = video->encParams->LayerFrameRate[1];\n        if (video->encParams->LayerMaxFrameRate[1] != 0)\n        {\n            total_framerate     = PV_MIN(total_framerate, video->encParams->LayerMaxFrameRate[1]);\n        }\n        else /* if the max is not set, set it to the specified profile/level */\n        {\n            video->encParams->LayerMaxFrameRate[1] = (float)scalable_profile_level_max_mbsPerSec[index] / nTotalMB;\n        }\n\n        total_packet_size   = video->encParams->ResyncPacketsize;\n        total_MBsPerSec     = (Int)(total_framerate * nTotalMB);\n\n        enhance_VBV_size    = PV_MAX((Int)((total_bitrate - base_bitrate) * delay),\n                                     (Int)(upper_bound_ratio * (total_bitrate - base_bitrate) / (total_framerate - base_framerate)));\n        enhance_VBV_size    = PV_MAX(enhance_VBV_size, MIN_BUFF);\n\n        total_VBV_size      = base_VBV_size + enhance_VBV_size;\n\n        /* if the buffer is larger than maximum buffer size, we'll clip it */\n        if (total_VBV_size > scalable_profile_level_max_VBV_size[CORE_SCALABLE_PROFILE_LEVEL3 - MAX_BASE_PROFILE - 1])\n        {\n            total_VBV_size = scalable_profile_level_max_VBV_size[CORE_SCALABLE_PROFILE_LEVEL3 - MAX_BASE_PROFILE - 1];\n            enhance_VBV_size = total_VBV_size - base_VBV_size;\n        }\n\n        /* Check if the buffer exceeds the maximum buffer size given the maximum profile and level */\n        if (total_VBV_size > scalable_profile_level_max_VBV_size[index])\n            return FALSE;\n    }\n\n\n    if (!bInitialized) /* Has been initialized --> profile @ level has been figured out! */\n    {\n        video->encParams->BufferSize[0] = base_VBV_size;\n        if (nLayers > 1)\n            video->encParams->BufferSize[1] = enhance_VBV_size;\n\n        return PV_TRUE;\n    }\n\n\n    /* Profile @ level determination */\n    if (nLayers == 1)\n    {\n        /* check other parameters */\n        /* BASE ONLY : Simple Profile(SP) Or Core Profile(CP) */\n        if (base_bitrate     > profile_level_max_bitrate[index]     ||\n                base_packet_size > profile_level_max_packet_size[index] ||\n                base_MBsPerSec   > profile_level_max_mbsPerSec[index]   ||\n                base_VBV_size    > profile_level_max_VBV_size[index])\n\n            return PV_FALSE; /* Beyond the bound of Core Profile @ Level2 */\n\n        /* For H263/Short header, determine k*16384 */\n        /* This part only applies to Short header mode, but not H.263 */\n        width16  = ((video->encParams->LayerWidth[0] + 15) >> 4) << 4;\n        height16 = ((video->encParams->LayerHeight[0] + 15) >> 4) << 4;\n        if (video->encParams->H263_Enabled)\n        {\n            k = 4;\n            if (width16  == 2*QCIF_WIDTH && height16 == 2*QCIF_HEIGHT)  /* CIF */\n                k = 16;\n\n            else if (width16  == 4*QCIF_WIDTH && height16 == 4*QCIF_HEIGHT)  /* 4CIF */\n                k = 32;\n\n            else if (width16  == 8*QCIF_WIDTH && height16 == 8*QCIF_HEIGHT)  /* 16CIF */\n                k = 64;\n\n            video->encParams->maxFrameSize  = k * 16384;\n\n            /* Make sure the buffer size is limited to the top profile and level: the SPL5 */\n            if (base_VBV_size > (Int)(k*16384 + 4*(float)profile_level_max_bitrate[SIMPLE_PROFILE_LEVEL5]*1001.0 / 30000.0))\n                base_VBV_size = (Int)(k * 16384 + 4 * (float)profile_level_max_bitrate[SIMPLE_PROFILE_LEVEL5] * 1001.0 / 30000.0);\n\n            if (base_VBV_size > (Int)(k*16384 + 4*(float)profile_level_max_bitrate[index]*1001.0 / 30000.0))\n                return PV_FALSE;\n        }\n\n        /* Search the appropriate profile@level index */\n        if (!video->encParams->H263_Enabled &&\n                (video->encParams->IntraDCVlcThr != 0 || video->encParams->SearchRange > 16))\n        {\n            lowest_level = SIMPLE_PROFILE_LEVEL1; /* cannot allow SPL0 */\n        }\n        else\n        {\n            lowest_level = SIMPLE_PROFILE_LEVEL0; /* SPL0 */\n        }\n\n        for (i = lowest_level; i <= index; i++)\n        {\n            /* Since CPL1 is smaller than SPL4A, SPL5, this search favors Simple Profile.  */\n\n            if (base_bitrate     <= profile_level_max_bitrate[i]     &&\n                    base_packet_size <= profile_level_max_packet_size[i] &&\n                    base_MBsPerSec   <= profile_level_max_mbsPerSec[i]   &&\n                    base_VBV_size    <= (video->encParams->H263_Enabled ? (Int)(k*16384 + 4*(float)profile_level_max_bitrate[i]*1001.0 / 30000.0) :\n                                         profile_level_max_VBV_size[i]))\n                break;\n        }\n        if (i > index) return PV_FALSE; /* Nothing found!! */\n\n        /* Found out the actual profile @ level : index \"i\" */\n        if (i == 0)\n        {\n            /* For Simple Profile @ Level 0, we need to do one more check: image size <= QCIF */\n            if (width16 > QCIF_WIDTH || height16 > QCIF_HEIGHT)\n                i = 1; /* image size > QCIF, then set SP level1 */\n        }\n\n        video->encParams->ProfileLevel[0] = profile_level_code[i];\n        video->encParams->BufferSize[0]   = base_VBV_size;\n\n        if (video->encParams->LayerMaxBitRate[0] == 0)\n            video->encParams->LayerMaxBitRate[0] = profile_level_max_bitrate[i];\n\n        if (video->encParams->LayerMaxFrameRate[0] == 0)\n            video->encParams->LayerMaxFrameRate[0] = PV_MIN(30, (float)profile_level_max_mbsPerSec[i] / nTotalMB);\n\n        /* For H263/Short header, one special constraint for VBV buffer size */\n        if (video->encParams->H263_Enabled)\n            video->encParams->BufferSize[0] = (Int)(k * 16384 + 4 * (float)profile_level_max_bitrate[i] * 1001.0 / 30000.0);\n\n    }\n    else\n    {\n        /* SCALABALE MODE: Simple Scalable Profile(SSP) Or Core Scalable Profile(CSP) */\n\n        if (total_bitrate       > scalable_profile_level_max_bitrate[index]     ||\n                total_packet_size   > scalable_profile_level_max_packet_size[index] ||\n                total_MBsPerSec     > scalable_profile_level_max_mbsPerSec[index]   ||\n                total_VBV_size      > scalable_profile_level_max_VBV_size[index])\n\n            return PV_FALSE; /* Beyond given profile and level */\n\n        /* One-time check: Simple Scalable Profile or Core Scalable Profile */\n        if (total_bitrate       <= scalable_profile_level_max_bitrate[CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1]        &&\n                total_packet_size   <= scalable_profile_level_max_packet_size[CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1]    &&\n                total_MBsPerSec     <= scalable_profile_level_max_mbsPerSec[CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1]      &&\n                total_VBV_size      <= scalable_profile_level_max_VBV_size[CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1])\n\n        {\n            start = 0;\n            end = index;\n        }\n\n        else\n        {\n            start = CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1;\n            end = index;\n        }\n\n\n        /* Search the scalable profile */\n        for (i = start; i <= end; i++)\n        {\n            if (total_bitrate       <= scalable_profile_level_max_bitrate[i]     &&\n                    total_packet_size   <= scalable_profile_level_max_packet_size[i] &&\n                    total_MBsPerSec     <= scalable_profile_level_max_mbsPerSec[i]   &&\n                    total_VBV_size      <= scalable_profile_level_max_VBV_size[i])\n\n                break;\n        }\n        if (i > end) return PV_FALSE;\n\n        /* Search for matching base profile */\n        if (i == 0)\n        {\n            j = 0;\n            bFound = 1;\n        }\n        else        bFound = 0;\n\n        if (i >= CORE_SCALABLE_PROFILE_LEVEL1 - MAX_BASE_PROFILE - 1)\n        {\n            start = CORE_PROFILE_LEVEL1;  /* range for CORE PROFILE  */\n            end = CORE_PROFILE_LEVEL2;\n        }\n        else\n        {\n            start = SIMPLE_PROFILE_LEVEL0;  /* range for SIMPLE PROFILE */\n            end = SIMPLE_PROFILE_LEVEL5;\n        }\n\n        for (j = start; !bFound && j <= end; j++)\n        {\n            if (base_bitrate        <= profile_level_max_bitrate[j]      &&\n                    base_packet_size    <= profile_level_max_packet_size[j]  &&\n                    base_MBsPerSec      <= profile_level_max_mbsPerSec[j]    &&\n                    base_VBV_size       <= profile_level_max_VBV_size[j])\n\n            {\n                bFound = 1;\n                break;\n            }\n        }\n\n        if (!bFound) // && start == 4)\n            return PV_FALSE; /* mis-match in the profiles between base layer and enhancement layer */\n\n        /* j for base layer, i for enhancement layer */\n        video->encParams->ProfileLevel[0] = profile_level_code[j];\n        video->encParams->ProfileLevel[1] = scalable_profile_level_code[i];\n        video->encParams->BufferSize[0]   = base_VBV_size;\n        video->encParams->BufferSize[1]   = enhance_VBV_size;\n\n        if (video->encParams->LayerMaxBitRate[0] == 0)\n            video->encParams->LayerMaxBitRate[0] = profile_level_max_bitrate[j];\n\n        if (video->encParams->LayerMaxBitRate[1] == 0)\n            video->encParams->LayerMaxBitRate[1] = scalable_profile_level_max_bitrate[i];\n\n        if (video->encParams->LayerMaxFrameRate[0] == 0)\n            video->encParams->LayerMaxFrameRate[0] = PV_MIN(30, (float)profile_level_max_mbsPerSec[j] / nTotalMB);\n\n        if (video->encParams->LayerMaxFrameRate[1] == 0)\n            video->encParams->LayerMaxFrameRate[1] = PV_MIN(30, (float)scalable_profile_level_max_mbsPerSec[i] / nTotalMB);\n\n\n    } /* end of: if(nLayers == 1) */\n\n\n    if (!video->encParams->H263_Enabled && (video->encParams->ProfileLevel[0] == 0x08)) /* SPL0 restriction*/\n    {\n        /* PV only allow frame-based rate control, no QP change from one MB to another\n        if(video->encParams->ACDCPrediction == TRUE && MB-based rate control)\n         return PV_FALSE */\n    }\n\n    return PV_TRUE;\n}\n\n#endif /* #ifndef ORIGINAL_VERSION */\n\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/mp4enc_lib.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef _MP4ENC_LIB_H_\n#define _MP4ENC_LIB_H_\n\n#include \"mp4def.h\"     // typedef\n#include \"mp4lib_int.h\" // main video structure\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n    /* defined in vop.c */\n    PV_STATUS EncodeVop(VideoEncData *video);\n    PV_STATUS EncodeSlice(VideoEncData *video);\n    PV_STATUS EncodeVideoPacketHeader(VideoEncData *video, int MB_number,\n                                      int quant_scale, Int insert);\n#ifdef ALLOW_VOP_NOT_CODED\n    PV_STATUS EncodeVopNotCoded(VideoEncData *video, UChar *bstream, Int *size, ULong modTime);\n#endif\n\n    /* defined in combined_decode.c */\n    PV_STATUS EncodeFrameCombinedMode(VideoEncData *video);\n    PV_STATUS EncodeSliceCombinedMode(VideoEncData *video);\n\n    /* defined in datapart_decode.c */\n    PV_STATUS EncodeFrameDataPartMode(VideoEncData *video);\n    PV_STATUS EncodeSliceDataPartMode(VideoEncData *video);\n\n    /* defined in fastcodeMB.c */\n\n//void m4v_memset(void *adr_dst, uint8 value, uint32 size);\n\n    PV_STATUS CodeMB_H263(VideoEncData *video, approxDCT *function, Int offsetQP, Int ncoefblck[]);\n#ifndef NO_MPEG_QUANT\n    PV_STATUS CodeMB_MPEG(VideoEncData *video, approxDCT *function, Int offsetQP, Int ncoefblck[]);\n#endif\n    Int getBlockSAV(Short block[]);\n    Int Sad8x8(UChar *rec, UChar *prev, Int lx);\n    Int getBlockSum(UChar *rec, Int lx);\n\n    /* defined in dct.c */\n    void  blockIdct(Short *block);\n    void blockIdct_SSE(Short *input);\n    void BlockDCTEnc(Short *blockData, Short *blockCoeff);\n\n    /*---- FastQuant.c -----*/\n    Int cal_dc_scalerENC(Int QP, Int type) ;\n    Int BlockQuantDequantH263Inter(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam,\n                                   UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,\n                                   Int dctMode, Int comp, Int dummy, UChar shortHeader);\n\n    Int BlockQuantDequantH263Intra(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam,\n                                   UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,\n                                   Int dctMode, Int comp, Int dc_scaler, UChar shortHeader);\n\n    Int BlockQuantDequantH263DCInter(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam,\n                                     UChar *bitmaprow, UInt *bitmapzz, Int dummy, UChar shortHeader);\n\n    Int BlockQuantDequantH263DCIntra(Short *rcoeff, Short *qcoeff, struct QPstruct *QuantParam,\n                                     UChar *bitmaprow, UInt *bitmapzz, Int dc_scaler, UChar shortHeader);\n\n#ifndef NO_MPEG_QUANT\n    Int BlockQuantDequantMPEGInter(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat,\n                                   UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,\n                                   Int DctMode, Int comp, Int dc_scaler);\n\n    Int BlockQuantDequantMPEGIntra(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat,\n                                   UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz,\n                                   Int DctMode, Int comp, Int dc_scaler);\n\n    Int BlockQuantDequantMPEGDCInter(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat,\n                                     UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dummy);\n\n    Int BlockQuantDequantMPEGDCIntra(Short *rcoeff, Short *qcoeff, Int QP, Int *qmat,\n                                     UChar bitmapcol[ ], UChar *bitmaprow, UInt *bitmapzz, Int dc_scaler);\n#endif\n\n    /*---- FastIDCT.c -----*/\n    void BlockIDCTMotionComp(Short *block, UChar *bitmapcol, UChar bitmaprow,\n                             Int dctMode, UChar *rec, UChar *prev, Int lx_intra_zeroMV);\n\n\n    /* defined in motion_comp.c */\n    void getMotionCompensatedMB(VideoEncData *video, Int ind_x, Int ind_y, Int offset);\n    void EncPrediction_INTER(Int xpred, Int ypred, UChar *c_prev, UChar *c_rec,\n                             Int width, Int round1);\n\n    void EncPrediction_INTER4V(Int xpred, Int ypred, MOT *mot, UChar *c_prev, UChar *c_rec,\n                               Int width, Int round1);\n\n    void EncPrediction_Chrom(Int xpred, Int ypred, UChar *cu_prev, UChar *cv_prev, UChar *cu_rec,\n                             UChar *cv_rec, Int pitch_uv, Int width_uv, Int height_uv, Int round1);\n\n    void get_MB(UChar *c_prev, UChar *c_prev_u  , UChar *c_prev_v,\n                Short mb[6][64], Int width, Int width_uv);\n\n    void PutSkippedBlock(UChar *rec, UChar *prev, Int lx);\n\n    /* defined in motion_est.c */\n    void MotionEstimation(VideoEncData *video);\n#ifdef HTFM\n    void InitHTFM(VideoEncData *video, HTFM_Stat *htfm_stat, double *newvar, Int *collect);\n    void UpdateHTFM(VideoEncData *video, double *newvar, double *exp_lamda, HTFM_Stat *htfm_stat);\n#endif\n\n    /* defined in ME_utils.c */\n    void ChooseMode_C(UChar *Mode, UChar *cur, Int lx, Int min_SAD);\n    void ChooseMode_MMX(UChar *Mode, UChar *cur, Int lx, Int min_SAD);\n    void GetHalfPelMBRegion_C(UChar *cand, UChar *hmem, Int lx);\n    void GetHalfPelMBRegion_SSE(UChar *cand, UChar *hmem, Int lx);\n    void GetHalfPelBlkRegion(UChar *cand, UChar *hmem, Int lx);\n    void PaddingEdge(Vop *padVop);\n    void ComputeMBSum_C(UChar *cur, Int lx, MOT *mot_mb);\n    void ComputeMBSum_MMX(UChar *cur, Int lx, MOT *mot_mb);\n    void ComputeMBSum_SSE(UChar *cur, Int lx, MOT *mot_mb);\n    void GetHalfPelMBRegionPadding(UChar *ncand, UChar *hmem, Int lx, Int *reptl);\n    void GetHalfPelBlkRegionPadding(UChar *ncand, UChar *hmem, Int lx, Int *reptl);\n\n    /* defined in findhalfpel.c */\n    void FindHalfPelMB(VideoEncData *video, UChar *cur, MOT *mot, UChar *ncand,\n                       Int xpos, Int ypos, Int *xhmin, Int *yhmin, Int hp_guess);\n    Int  FindHalfPelBlk(VideoEncData *video, UChar *cur, MOT *mot, Int sad16, UChar *ncand8[],\n                        UChar *mode, Int xpos, Int ypos, Int *xhmin, Int *yhmin, UChar *hp_mem);\n\n\n    /* defined in sad.c */\n    Int SAD_MB_HalfPel_Cxhyh(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);\n    Int SAD_MB_HalfPel_Cyh(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);\n    Int SAD_MB_HalfPel_Cxh(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);\n    Int SAD_MB_HalfPel_MMX(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);\n    Int SAD_MB_HalfPel_SSE(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);\n    Int SAD_Blk_HalfPel_C(UChar *ref, UChar *blk, Int dmin, Int lx, Int rx, Int xh, Int yh, void *extra_info);\n    Int SAD_Blk_HalfPel_MMX(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info);\n    Int SAD_Blk_HalfPel_SSE(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info);\n    Int SAD_Macroblock_C(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);\n    Int SAD_Macroblock_MMX(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);\n    Int SAD_Macroblock_SSE(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);\n    Int SAD_Block_C(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info);\n    Int SAD_Block_MMX(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info);\n    Int SAD_Block_SSE(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info);\n\n#ifdef HTFM /* Hypothesis Testing Fast Matching */\n    Int SAD_MB_HP_HTFM_Collectxhyh(UChar *ref, UChar *blk, Int dmin_x, void *extra_info);\n    Int SAD_MB_HP_HTFM_Collectyh(UChar *ref, UChar *blk, Int dmin_x, void *extra_info);\n    Int SAD_MB_HP_HTFM_Collectxh(UChar *ref, UChar *blk, Int dmin_x, void *extra_info);\n    Int SAD_MB_HP_HTFMxhyh(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);\n    Int SAD_MB_HP_HTFMyh(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);\n    Int SAD_MB_HP_HTFMxh(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);\n    Int SAD_MB_HTFM_Collect(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);\n    Int SAD_MB_HTFM(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);\n#endif\n    /* on-the-fly padding */\n    Int SAD_Blk_PADDING(UChar *ref, UChar *cur, Int dmin, Int lx, void *extra_info);\n    Int SAD_MB_PADDING(UChar *ref, UChar *cur, Int dmin, Int lx, void *extra_info);\n#ifdef HTFM\n    Int SAD_MB_PADDING_HTFM_Collect(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info);\n    Int SAD_MB_PADDING_HTFM(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info);\n#endif\n\n    /* defined in rate_control.c */\n    /* These are APIs to rate control exposed to core encoder module. */\n    PV_STATUS RC_Initialize(void *video);\n    PV_STATUS RC_VopQPSetting(VideoEncData *video, rateControl *rc[]);\n    PV_STATUS RC_VopUpdateStat(VideoEncData *video, rateControl *rc);\n    PV_STATUS RC_MBQPSetting(VideoEncData *video, rateControl *rc, Int start_packet_header);\n    PV_STATUS RC_MBUpdateStat(VideoEncData *video, rateControl *rc, Int Bi, Int Hi);\n    PV_STATUS RC_Cleanup(rateControl *rc[], Int numLayers);\n\n    Int       RC_GetSkipNextFrame(VideoEncData *video, Int currLayer);\n    Int       RC_GetRemainingVops(VideoEncData *video, Int currLayer);\n    void      RC_ResetSkipNextFrame(VideoEncData *video, Int currLayer);\n    PV_STATUS RC_UpdateBuffer(VideoEncData *video, Int currLayer, Int num_skip);\n    PV_STATUS RC_UpdateBXRCParams(void *input);\n\n\n    /* defined in vlc_encode.c */\n    void MBVlcEncodeDataPar_I_VOP(VideoEncData *video, Int ncoefblck[], void *blkCodePtr);\n    void MBVlcEncodeDataPar_P_VOP(VideoEncData *video, Int ncoefblck[], void *blkCodePtr);\n    void MBVlcEncodeCombined_I_VOP(VideoEncData *video, Int ncoefblck[], void *blkCodePtr);\n    void MBVlcEncodeCombined_P_VOP(VideoEncData *video, Int ncoefblck[], void *blkCodePtr);\n    void BlockCodeCoeff_ShortHeader(RunLevelBlock *RLB, BitstreamEncVideo *bs, Int j_start, Int j_stop, UChar Mode);\n    void BlockCodeCoeff_RVLC(RunLevelBlock *RLB, BitstreamEncVideo *bs, Int j_start, Int j_stop, UChar Mode);\n    void BlockCodeCoeff_Normal(RunLevelBlock *RLB, BitstreamEncVideo *bs, Int j_start, Int j_stop, UChar Mode);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* _MP4ENC_LIB_H_ */\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/mp4lib_int.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n\n#ifndef _MP4LIB_INT_H_\n#define _MP4LIB_INT_H_\n\n#include \"oscl_types.h\"\n\n#include \"mp4def.h\"\n#include \"mp4enc_api.h\"\n#include \"rate_control.h\"\n\n/* BitstreamEncVideo will be modified */\ntypedef struct tagBitstream\n{\n    Int(*writeVideoPacket)(UChar *buf, Int nbytes_required);   /*write video packet out */\n    UChar *bitstreamBuffer; /*buffer to hold one video packet*/\n    Int bufferSize; /*total bitstream buffer size in bytes */\n    Int byteCount;  /*how many bytes already encoded*/\n    UInt word;      /*hold one word temporarily */\n    Int bitLeft;    /*number of bits left in \"word\" */\n    UChar* overrunBuffer;  /* pointer to overrun buffer */\n    Int oBSize;     /* length of overrun buffer */\n    struct tagVideoEncData *video;\n} BitstreamEncVideo;\n\ntypedef struct tagVOP\n{\n    PIXEL   *yChan;             /* The Y component */\n    PIXEL   *uChan;             /* The U component */\n    PIXEL   *vChan;             /* The V component */\n    Int     frame;              /* frame number */\n    Int     volID;              /* Layer number */\n    //Int       timeStamp;          /* Vop TimeStamp in msec */\n\n    /* Syntax elements copied from VOL (standard) */\n    Int     width;              /* Width (multiple of 16) */\n    Int     height;             /* Height (multiple of 16) */\n    Int     pitch;              /* Pitch (differs from width for UMV case) */\n    Int     padded;     /* flag whether this frame has been padded */\n\n    /* Actual syntax elements for VOP (standard) */\n    Int     predictionType;     /* VOP prediction type */\n    Int     timeInc;            /* VOP time increment (relative to last mtb) */\n    Int     vopCoded;\n    Int     roundingType;\n    Int     intraDCVlcThr;\n    Int     quantizer;          /* VOP quantizer */\n    Int     fcodeForward;       /* VOP dynamic range of motion vectors */\n    Int     fcodeBackward;      /* VOP dynamic range of motion vectors */\n    Int     refSelectCode;      /* enhancement layer reference select code */\n\n    /* H.263 parameters */\n    Int     gobNumber;\n    Int     gobFrameID;\n    Int     temporalRef;        /* temporal reference, roll over at 256 */\n    Int     temporalInterval;   /* increase every 256 temporalRef */\n\n} Vop;\n\ntypedef struct tagVol\n{\n    Int     volID;              /* VOL identifier (for tracking) */\n    Int     shortVideoHeader;   /* shortVideoHeader mode */\n    Int     GOVStart;           /* Insert GOV Header */\n    Int     timeIncrementResolution;    /* VOL time increment */\n    Int     nbitsTimeIncRes;    /* number of bits for time increment */\n    Int     timeIncrement;      /* time increment */\n    Int     moduloTimeBase;     /* internal decoder clock */\n    Int     prevModuloTimeBase; /* in case of pre-frameskip */\n\n    Int     fixedVopRate;\n    BitstreamEncVideo  *stream; /* library bitstream buffer (input buffer) */\n\n    /* VOL Dimensions */\n    Int     width;              /* Width */\n    Int     height;             /* Height */\n\n    /* Error Resilience Flags */\n    Int     ResyncMarkerDisable; /* VOL Disable Resynch Markers */\n    Int     useReverseVLC;      /* VOL reversible VLCs */\n    Int     dataPartitioning;   /* VOL data partitioning */\n\n    /* Quantization related parameters */\n    Int     quantPrecision;     /* Quantizer precision */\n    Int     quantType;          /* MPEG-4 or H.263 Quantization Type */\n\n    /* Added loaded quant mat, 05/22/2000 */\n    Int     loadIntraQuantMat;      /* Load intra quantization matrix */\n    Int     loadNonIntraQuantMat;   /* Load nonintra quantization matrix */\n    Int     iqmat[64];          /* Intra quant.matrix */\n    Int     niqmat[64];         /* Non-intra quant.matrix */\n\n\n    /* Parameters used for scalability */\n    Int     scalability;        /* VOL scalability (flag) */\n    Int     scalType;           /* temporal = 0, spatial = 1, both = 2 */\n\n    Int     refVolID;           /* VOL id of reference VOL */\n    Int     refSampDir;         /* VOL resol. of ref. VOL */\n    Int     horSamp_n;          /* VOL hor. resampling of ref. VOL given by */\n    Int     horSamp_m;          /* sampfac = hor_samp_n/hor_samp_m      */\n    Int     verSamp_n;          /* VOL ver. resampling of ref. VOL given by */\n    Int     verSamp_m;          /* sampfac = ver_samp_n/ver_samp_m      */\n    Int     enhancementType;    /* VOL type of enhancement layer */\n\n    /* These variables were added since they are used a lot. */\n    Int     nMBPerRow, nMBPerCol;   /* number of MBs in each row & column    */\n    Int     nTotalMB;\n    Int     nBitsForMBID;           /* how many bits required for MB number? */\n\n    /* for short video header */\n    Int     nMBinGOB;           /* number of MBs in GOB, 05/22/00 */\n    Int     nGOBinVop;          /* number of GOB in Vop  05/22/00 */\n} Vol;\n\ntypedef struct tagMacroBlock\n{\n    Int     mb_x;               /* X coordinate */\n    Int     mb_y;               /* Y coordinate */\n    Short   block[9][64];       /* 4-Y, U and V blocks , and AAN Scale*/\n} MacroBlock;\n\ntypedef struct tagRunLevelBlock\n{\n    Int run[64];        /* Runlength */\n    Int level[64];      /* Abs(level) */\n    Int s[64];          /* sign level */\n} RunLevelBlock;\n\ntypedef struct tagHeaderInfoDecVideo\n{\n    UChar       *Mode;              /* Modes INTRA/INTER/etc. */\n    UChar       *CBP;               /* MCBPC/CBPY stuff */\n} HeaderInfoEncVideo;\n\ntypedef Short typeDCStore[6];   /* ACDC */\ntypedef Short typeDCACStore[4][8];\n\ntypedef struct tagMOT\n{\n    Int x;  /* half-pel resolution x component */\n    Int y;      /* half-pel resolution y component */\n    Int sad;  /* SAD */\n} MOT;\n\ntypedef struct tagHintTrackInfo\n{\n    UChar MTB;\n    UChar LayerID;\n    UChar CodeType;\n    UChar RefSelCode;\n\n} HintTrackInfo;\n\n\ntypedef struct tagVideoEncParams\n{\n    //Int       Width;                  /* Input Width */\n    //Int       Height;                 /* Input Height */\n    //float FrameRate;              /* Input Frame Rate */\n    UInt    TimeIncrementRes;       /* timeIncrementRes */\n\n    /*VOL Parameters */\n    Int     nLayers;\n    Int     LayerWidth[4];          /* Encoded Width */\n    Int     LayerHeight[4];         /* Encoded Height */\n    float   LayerFrameRate[4];      /* Encoded Frame Rate */\n    Int     LayerBitRate[4];        /* Encoded BitRate */\n    Int     LayerMaxBitRate[4];     /* Maximum Encoded BitRate */\n    float   LayerMaxFrameRate[4];   /* Maximum Encoded Frame Rate */\n    Int     LayerMaxMbsPerSec[4];   /* Maximum mbs per second, according to the specified profile and level */\n    Int     LayerMaxBufferSize[4];  /* Maximum buffer size, according to the specified profile and level */\n\n    Bool    ResyncMarkerDisable;    /* Disable Resync Marker */\n    Bool    DataPartitioning;       /* Base Layer Data Partitioning */\n    Bool    ReversibleVLC;          /* RVLC when Data Partitioning */\n    Bool    ACDCPrediction;         /* AC/DC Prediction    */\n    Int     QuantType[4];           /* H263, MPEG2 */\n    Int     InitQuantBvop[4];\n    Int     InitQuantPvop[4];\n    Int     InitQuantIvop[4];\n    Int     ResyncPacketsize;\n\n    Int     RoundingType;\n    Int     IntraDCVlcThr;\n\n    /* Rate Control Parameters */\n    MP4RateControlType  RC_Type;        /*Constant Q, M4 constantRate, VM5+, M4RC,MPEG2TM5 */\n\n    /* Intra Refresh Parameters */\n    Int     IntraPeriod;            /* Intra update period */\n    Int     Refresh;                /* Number of MBs refresh in each frame */\n    /* Other Parameters */\n    Bool    SceneChange_Det;        /* scene change detection */\n    Bool    FineFrameSkip_Enabled;  /* src rate resolution frame skipping */\n    Bool    VBR_Enabled;            /* VBR rate control */\n    Bool    NoFrameSkip_Enabled;    /* do not allow frame skip */\n    Bool    NoPreSkip_Enabled;      /* do not allow pre-skip */\n\n    Bool    H263_Enabled;           /* H263 Short Header */\n    Bool    GOV_Enabled;            /* GOV Header Enabled */\n    Bool    SequenceStartCode;      /* This probably should be removed */\n    Bool    FullSearch_Enabled;     /* full-pel exhaustive search motion estimation */\n    Bool    HalfPel_Enabled;        /* Turn Halfpel ME on or off */\n    Bool    MV8x8_Enabled;          /* Enable 8x8 motion vectors */\n    Bool    RD_opt_Enabled;         /* Enable operational R-D optimization */\n    Int     GOB_Header_Interval;        /* Enable encoding GOB header in H263_WITH_ERR_RES and SHORT_HERDER_WITH_ERR_RES */\n    Int     SearchRange;            /* Search range for 16x16 motion vector */\n    Int     MemoryUsage;            /* Amount of memory allocated */\n    Int     GetVolHeader[2];        /* Flag to check if Vol Header has been retrieved */\n    Int     BufferSize[2];          /* Buffer Size for Base and Enhance Layers */\n    Int     ProfileLevel[2];        /* Profile and Level for encoding purposes */\n    float   VBV_delay;              /* VBV buffer size in the form of delay */\n    Int     maxFrameSize;           /* maximum frame size(bits) for H263/Short header mode, k*16384 */\n    Int     profile_table_index;    /* index for profile and level tables given the specified profile and level */\n\n} VideoEncParams;\n\n/* platform dependent functions */\ntypedef struct tagFuncPtr\n{\n//  Int (*SAD_MB_HalfPel)(UChar *ref,UChar *blk,Int dmin_lx,Int xh,Int yh,void *extra_info);\n    Int(*SAD_MB_HalfPel[4])(UChar*, UChar*, Int, void *);\n    Int(*SAD_Blk_HalfPel)(UChar *ref, UChar *blk, Int dmin, Int lx, Int rx, Int xh, Int yh, void *extra_info);\n    Int(*SAD_Macroblock)(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info);\n    Int(*SAD_Block)(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info);\n    Int(*SAD_MB_PADDING)(UChar *ref, UChar *blk, Int dmin, Int lx, void *extra_info); /*, 4/21/01 */\n    void (*ComputeMBSum)(UChar *cur, Int lx, MOT *mot_mb);\n    void (*ChooseMode)(UChar *Mode, UChar *cur, Int lx, Int min_SAD);\n    void (*GetHalfPelMBRegion)(UChar *cand, UChar *hmem, Int lx);\n    void (*blockIdct)(Int *block);\n\n\n} FuncPtr;\n\n/* 04/09/01, for multipass rate control */\n\ntypedef struct tagRDInfo\n{\n    Int QP;\n    Int actual_bits;\n    float mad;\n    float R_D;\n} RDInfo;\n\ntypedef struct tagMultiPass\n{\n    /* multipass rate control data */\n    Int target_bits;    /* target bits for current frame, = rc->T */\n    Int actual_bits;    /* actual bits for current frame obtained after encoding, = rc->Rc*/\n    Int QP;             /* quantization level for current frame, = rc->Qc*/\n    Int prev_QP;        /* quantization level for previous frame */\n    Int prev_prev_QP;   /* quantization level for previous frame before last*/\n    float mad;          /* mad for current frame, = video->avgMAD*/\n    Int bitrate;        /* bitrate for current frame */\n    float framerate;    /* framerate for current frame*/\n\n    Int nRe_Quantized;  /* control variable for multipass encoding, */\n    /* 0 : first pass */\n    /* 1 : intermediate pass(quantization and VLC loop only) */\n    /* 2 : final pass(de-quantization, idct, etc) */\n    /* 3 : macroblock level rate control */\n\n    Int encoded_frames;     /* counter for all encoded frames */\n    Int re_encoded_frames;  /* counter for all multipass encoded frames*/\n    Int re_encoded_times;   /* counter for all times of multipass frame encoding */\n\n    /* Multiple frame prediction*/\n    RDInfo **pRDSamples;        /* pRDSamples[30][32], 30->30fps, 32 -> 5 bit quantizer, 32 candidates*/\n    Int framePos;               /* specific position in previous multiple frames*/\n    Int frameRange;             /* number of overall previous multiple frames */\n    Int samplesPerFrame[30];    /* number of samples per frame, 30->30fps */\n\n    /* Bit allocation for scene change frames and high motion frames */\n    float sum_mad;\n    Int counter_BTsrc;  /* BT = Bit Transfer, bit transfer from low motion frames or less complicatedly compressed frames */\n    Int counter_BTdst;  /* BT = Bit Transfer, bit transfer to scene change frames or high motion frames or more complicatedly compressed frames */\n    float sum_QP;\n    Int diff_counter;   /* diff_counter = -diff_counter_BTdst, or diff_counter_BTsrc */\n\n    /* For target bitrate or framerate update */\n    float target_bits_per_frame;        /* = C = bitrate/framerate */\n    float target_bits_per_frame_prev;   /* previous C */\n    float aver_mad;                     /* so-far average mad could replace sum_mad */\n    float aver_mad_prev;                /* previous average mad */\n    Int   overlapped_win_size;          /* transition period of time */\n    Int   encoded_frames_prev;          /* previous encoded_frames */\n} MultiPass;\n\n/* End */\n\n#ifdef HTFM\ntypedef struct tagHTFM_Stat\n{\n    Int abs_dif_mad_avg;\n    UInt countbreak;\n    Int offsetArray[16];\n    Int offsetRef[16];\n} HTFM_Stat;\n#endif\n\n/* Global structure that can be passed around */\ntypedef struct tagVideoEncData\n{\n    /* VOL Header Initialization */\n    UChar   volInitialize[4];       /* Used to Write VOL Headers */\n    /* Data For Layers (Scalability) */\n    Int     numberOfLayers;     /* Number of Layers */\n    Vol     **vol;              /* Data stored for each VOL */\n\n    /* Data used for encoding frames */\n    VideoEncFrameIO *input;     /* original input frame */\n    Vop     *currVop;           /* Current reconstructed VOP */\n    Vop     *prevBaseVop;       /* Previous reference Base Vop */\n    Vop     *nextBaseVop;       /* Next reference Base Vop */\n    Vop     *prevEnhanceVop;/* Previous Enhancement Layer Vop */\n    Vop     *forwardRefVop;     /* Forward Reference VOP */\n    Vop     *backwardRefVop;    /* Backward Reference VOP */\n\n    /* scratch memory */\n    BitstreamEncVideo  *bitstream1; /* Used for data partitioning */\n    BitstreamEncVideo  *bitstream2; /* and combined modes as      */\n    BitstreamEncVideo  *bitstream3; /* intermediate storages      */\n\n    UChar   *overrunBuffer;  /* extra output buffer to prevent current skip due to output buffer overrun*/\n    Int     oBSize;     /* size of allocated overrun buffer */\n\n    Int dc_scalar_1;            /*dc scalar for Y block */\n    Int dc_scalar_2;            /*dc scalar for U, V block*/\n\n    /* Annex L Rate Control */\n    rateControl     *rc[4];         /* Pointer to Rate Control structure*/\n    /* 12/25/00, each R.C. for each layer */\n\n    /********* motion compensation related variables ****************/\n    MOT     **mot;              /* Motion vectors */\n    /*  where [mbnum][0] = 1MV.\n        [mbnum][1...4] = 4MVs\n        [mbnum][5] = backward MV.\n        [mbnum][6] = delta MV for direct mode.\n        [mbnum][7] = nothing yet. */\n    UChar   *intraArray;            /* Intra Update Arrary */\n    float   sumMAD;             /* SAD/MAD for frame */\n\n    /* to speedup the SAD calculation */\n    void *sad_extra_info;\n#ifdef HTFM\n    Int nrmlz_th[48];       /* Threshold for fast SAD calculation using HTFM */\n    HTFM_Stat htfm_stat;    /* For statistics collection */\n#endif\n\n    /*Tao 04/09/00  For DCT routine */\n    UChar currYMB[256];     /* interleaved current macroblock in HTFM order */\n    MacroBlock  *outputMB;          /* Output MB to VLC encode */\n    UChar   predictedMB[384];   /* scrath memory for predicted value */\n    RunLevelBlock RLB[6];       /* Run and Level of coefficients! */\n    Short   dataBlock[128];     /* DCT block data before and after quant/dequant*/\n\n    UChar   bitmaprow[8];       /* Need to keep it for ACDCPrediction, 8 bytes for alignment, need only 6 */\n    UChar   bitmapcol[6][8];\n    UInt    bitmapzz[6][2]; /* for zigzag bitmap */\n    Int     zeroMV;         /* flag for zero MV */\n\n    Int     usePrevQP;      /* flag for intraDCVlcThreshold switch decision */\n    Int     QP_prev;            /* use for DQUANT calculation */\n    Int     *acPredFlag;        /* */\n    typeDCStore     *predDC;        /* The DC coeffs for each MB */\n    typeDCACStore   *predDCAC_row;\n    typeDCACStore   *predDCAC_col;\n\n\n    UChar   *sliceNo;           /* Slice Number for each MB */\n\n    Int     header_bits;        /* header bits in frmae */\n    HeaderInfoEncVideo  headerInfo; /* MB Header information */\n    UChar   zz_direction;       /* direction of zigzag scan */\n    UChar   *QPMB;              /* Quantizer value for each MB */\n\n    /* Miscellaneous data points to be passed */\n    float   FrameRate;          /* Src frame Rate */\n\n    ULong   nextModTime;        /* expected next frame time */\n    UInt    prevFrameNum[4];    /* previous frame number starting from modTimeRef */\n    UInt    modTimeRef;     /* Reference modTime update every I-Vop*/\n    UInt    refTick[4];         /* second aligned referenc tick */\n    Int     relLayerCodeTime[4];/* Next coding time for each Layer relative to highest layer */\n\n    ULong   modTime;            /* Input frame modTime */\n    Int     currLayer;          /* Current frame layer  */\n    Int     mbnum;              /*  Macroblock number */\n\n    /* slice coding, state variables */\n    Vop     *tempForwRefVop;\n    Int     tempRefSelCode;\n    Int     end_of_buf;         /* end of bitstream buffer flag */\n    Int     slice_coding;       /* flag for slice based coding */\n    Int     totalSAD;           /* So far total SAD for a frame */\n    Int     numIntra;           /* So far number of Intra MB */\n    Int     offset;             /* So far MB offset */\n    Int     ind_x, ind_y;       /* So far MB coordinate */\n    Int     collect;\n    Int     hp_guess;\n    /*********************************/\n\n    HintTrackInfo hintTrackInfo;    /* hintTrackInfo */\n    /* IntraPeriod, Timestamp, etc. */\n    float       nextEncIVop;    /* counter til the next I-Vop */\n    float       numVopsInGOP;   /* value at the beginning of nextEncIVop */\n\n    /* platform dependent functions */\n    FuncPtr     *functionPointer;   /* structure containing platform dependent functions */\n\n    /* Application controls */\n    VideoEncControls    *videoEncControls;\n    VideoEncParams      *encParams;\n\n    MultiPass *pMP[4]; /* for multipass encoding, 4 represents 4 layer encoding */\n\n} VideoEncData;\n\n/*************************************************************/\n/*                  VLC structures                           */\n/*************************************************************/\n\ntypedef struct tagVLCtable\n{\n    unsigned int code; /* right justified */\n    int len;\n} VLCtable, *LPVLCtable;\n\n\n/*************************************************************/\n/*                  Approx DCT                               */\n/*************************************************************/\ntypedef struct struct_approxDCT  approxDCT;\nstruct struct_approxDCT\n{\n    Void(*BlockDCT8x8)(Int *, Int *, UChar *, UChar *, Int, Int);\n    Void(*BlockDCT8x8Intra)(Int *, Int *, UChar *, UChar *, Int, Int);\n    Void(*BlockDCT8x8wSub)(Int *, Int *, UChar *, UChar *, Int, Int);\n};\n\n/*************************************************************/\n/*                  QP structure                             */\n/*************************************************************/\n\nstruct QPstruct\n{\n    Int QPx2 ;\n    Int QP;\n    Int QPdiv2;\n    Int QPx2plus;\n    Int Addition;\n};\n\n\n#endif /* _MP4LIB_INT_H_ */\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/pvm4vencoder.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2010 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"pvm4vencoder.h\"\n#include \"oscl_mem.h\"\n\n#include \"oscl_dll.h\"\nOSCL_DLL_ENTRY_POINT_DEFAULT()\n\n/* ///////////////////////////////////////////////////////////////////////// */\nCPVM4VEncoder::CPVM4VEncoder()\n{\n#if defined(RGB24_INPUT) || defined (RGB12_INPUT) || defined(YUV420SEMIPLANAR_INPUT)\n    ccRGBtoYUV = NULL;\n#endif\n    //iEncoderControl\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF CPVM4VEncoder::~CPVM4VEncoder()\n{\n#if defined(RGB24_INPUT) || defined (RGB12_INPUT) || defined(YUV420SEMIPLANAR_INPUT)\n    OSCL_DELETE(ccRGBtoYUV);\n#endif\n\n    Cancel(); /* CTimer function */\n    Terminate();\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF CPVM4VEncoder* CPVM4VEncoder::New(int32 aThreadId)\n{\n    CPVM4VEncoder* self = new CPVM4VEncoder;\n    if (self && self->Construct(aThreadId))\n        return self;\n    if (self)\n        OSCL_DELETE(self);\n    return NULL;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nbool CPVM4VEncoder::Construct(int32 aThreadId)\n{\n    oscl_memset((void *)&iEncoderControl, 0, sizeof(VideoEncControls));\n    iInitialized = false;\n    iObserver = NULL;\n    iNumOutputData = 0;\n    iYUVIn = NULL;\n    for (int i = 0; i < KCVEIMaxOutputBuffer; i++)\n    {\n        iOutputData[i] = NULL;\n    }\n    iState = EIdle;\n\n    if (aThreadId >= 0)\n        AddToScheduler();\n\n    return true;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nvoid CPVM4VEncoder::DoCancel()\n{\n    /* called when Cancel() is called.*/\n    // They use Stop for PVEngine.cpp in PVPlayer.\n    return ;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::SetObserver(MPVCVEIObserver *aObserver)\n{\n    iObserver = aObserver;\n    return ECVEI_SUCCESS;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::AddBuffer(TPVVideoOutputData *aVidOut)\n{\n    if (iNumOutputData >= KCVEIMaxOutputBuffer)\n    {\n        return ECVEI_FAIL;\n    }\n\n    iOutputData[iNumOutputData++] = aVidOut;\n\n    return ECVEI_SUCCESS;\n}\n\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::Encode(TPVVideoInputData *aVidIn)\n{\n    ULong modTime;\n    VideoEncFrameIO vid_in;\n\n    if (iState != EIdle || iObserver == NULL)\n    {\n        return ECVEI_FAIL;\n    }\n\n    if (aVidIn->iTimeStamp >= iNextModTime)\n    {\n        if (iVideoFormat == ECVEI_YUV420)\n#ifdef YUV_INPUT\n        {\n            if (iYUVIn) /* iSrcWidth is not multiple of 4 or iSrcHeight is odd number */\n            {\n                CopyToYUVIn(aVidIn->iSource, iSrcWidth, iSrcHeight,\n                ((iSrcWidth + 15) >> 4) << 4, ((iSrcHeight + 15) >> 4) << 4);\n                iVideoIn = iYUVIn;\n            }\n            else /* otherwise, we can just use aVidIn->iSource */\n            {\n                iVideoIn = aVidIn->iSource;\n            }\n        }\n#else\n            return ECVEI_FAIL;\n#endif\n\n        if ((iVideoFormat == ECVEI_RGB12) || (iVideoFormat == ECVEI_RGB24) || (iVideoFormat == ECVEI_YUV420SEMIPLANAR))\n#if defined(RGB24_INPUT) || defined (RGB12_INPUT) || defined(YUV420SEMIPLANAR_INPUT)\n        {\n            ccRGBtoYUV->Convert(aVidIn->iSource, iYUVIn);\n            iVideoIn = iYUVIn;\n        }\n#else\n            return ECVEI_FAIL;\n#endif\n\n        /* assign with backward-P or B-Vop this timestamp must be re-ordered */\n        iTimeStamp = aVidIn->iTimeStamp;\n\n        modTime = iTimeStamp;\n\n#ifdef NO_SLICE_ENCODE\n        return ECVEI_FAIL;\n#else\n        vid_in.height = ((iSrcHeight + 15) >> 4) << 4;\n        vid_in.pitch = ((iSrcWidth + 15) >> 4) << 4;\n        vid_in.timestamp = modTime;\n        vid_in.yChan = (UChar*)iVideoIn;\n        vid_in.uChan = (UChar*)(iVideoIn + vid_in.height * vid_in.pitch);\n        vid_in.vChan = vid_in.uChan + ((vid_in.height * vid_in.pitch) >> 2);\n\n        /*status = */\n        (int) PVEncodeFrameSet(&iEncoderControl, &vid_in, &modTime, &iNumLayer);\n#endif\n\n        iState = EEncode;\n        RunIfNotReady();\n\n        return ECVEI_SUCCESS;\n    }\n    else /* if(aVidIn->iTimeStamp >= iNextModTime) */\n    {\n        iTimeStamp = aVidIn->iTimeStamp;\n        iNumLayer = -1;\n        iState = EEncode;\n        RunIfNotReady();\n        return ECVEI_SUCCESS;\n    }\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nvoid CPVM4VEncoder::Run()\n{\n#ifndef NO_SLICE_ENCODE\n\n    //Bool status;\n    Int Size, endOfFrame = 0;\n    ULong modTime;\n    int32 oindx;\n    VideoEncFrameIO vid_out;\n\n    switch (iState)\n    {\n        case EEncode:\n            /* find available bitstream */\n            if (iNumOutputData <= 0)\n            {\n                iObserver->HandlePVCVEIEvent(iId, ECVEI_NO_BUFFERS);\n                RunIfNotReady(50);\n                break;\n            }\n            oindx   =   --iNumOutputData;                       /* last-in first-out */\n            Size    =   iOutputData[oindx]->iBitStreamSize;\n            iOutputData[oindx]->iExternalTimeStamp  = iTimeStamp;\n            iOutputData[oindx]->iVideoTimeStamp     = iTimeStamp;\n            iOutputData[oindx]->iFrame              = iVideoOut;\n\n            if (iNumLayer == -1)\n            {\n                iOutputData[oindx]->iBitStreamSize = 0;\n                iOutputData[oindx]->iLayerNumber = iNumLayer;\n                iState = EIdle;\n                iObserver->HandlePVCVEIEvent(iId, ECVEI_FRAME_DONE, (uint32)iOutputData[oindx]);\n                break;\n            }\n\n            /*status = */\n            (int) PVEncodeSlice(&iEncoderControl, (UChar*)iOutputData[oindx]->iBitStream, &Size,\n                                &endOfFrame, &vid_out, &modTime);\n\n            iOutputData[oindx]->iBitStreamSize = Size;\n            iOutputData[oindx]->iLayerNumber = iNumLayer;\n            if (endOfFrame != 0) /* done with this frame */\n            {\n                iNextModTime = modTime;\n                iOutputData[oindx]->iFrame = iVideoOut = (uint8*)vid_out.yChan;\n                iOutputData[oindx]->iVideoTimeStamp = vid_out.timestamp;\n\n                if (endOfFrame == -1) /* pre-skip */\n                {\n                    iOutputData[oindx]->iLayerNumber = -1;\n                }\n                else\n                {\n                    PVGetHintTrack(&iEncoderControl, &(iOutputData[oindx]->iHintTrack));\n                }\n                iState = EIdle;\n                iObserver->HandlePVCVEIEvent(iId, ECVEI_FRAME_DONE, (uint32)iOutputData[oindx]);\n            }\n            else\n            {\n                RunIfNotReady();\n                iObserver->HandlePVCVEIEvent(iId, ECVEI_BUFFER_READY, (uint32)iOutputData[oindx]);\n            }\n\n            break;\n        default:\n            break;\n    }\n#endif /* NO_SLICE_ENCODE */\n\n    return ;\n}\n\nTCVEI_RETVAL CPVM4VEncoder::ParseFSI(uint8* aFSIBuff, int FSILength, VideoEncOptions *aEncOption)\n{\n    uint32 codeword;\n    mp4StreamType *psBits;\n    psBits = (mp4StreamType *) oscl_malloc(sizeof(mp4StreamType));\n    if (psBits == NULL)\n        return ECVEI_FAIL;\n    psBits->data = aFSIBuff;\n    psBits->numBytes = FSILength;\n    psBits->bitBuf = 0;\n    psBits->bitPos = 32;\n    psBits->bytePos = 0;\n    psBits->dataBitPos = 0;\n\n    //visual_object_sequence_start_code\n    ShowBits(psBits, 32, &codeword);\n\n    if (codeword == VISUAL_OBJECT_SEQUENCE_START_CODE)\n    {\n        psBits->dataBitPos += 32;\n        psBits->bitPos += 32;\n\n        ReadBits(psBits, 8, &codeword); /* profile_and_level_indication */\n\n        switch (codeword)\n        {\n            case 0x08: /* SP_LVL0 */\n            {\n                aEncOption->profile_level =  SIMPLE_PROFILE_LEVEL0;\n                break;\n            }\n            case 0x01: /* SP_LVL1 */\n            {\n                aEncOption->profile_level =  SIMPLE_PROFILE_LEVEL1;\n                break;\n            }\n            case 0x02: /* SP_LVL2 */\n            {\n                aEncOption->profile_level =  SIMPLE_PROFILE_LEVEL2;\n                break;\n            }\n            case 0x03: /* SP_LVL3 */\n            {\n                aEncOption->profile_level =  SIMPLE_PROFILE_LEVEL3;\n                break;\n            }\n            case 0x21: /* CP_LVL1 */\n            {\n                aEncOption->profile_level =  CORE_PROFILE_LEVEL1;\n                break;\n            }\n            case 0x22: /* CP_LVL2 */\n            {\n                aEncOption->profile_level =  CORE_PROFILE_LEVEL2;\n                break;\n            }\n            default:\n            {\n                goto FREE_PS_BITS_AND_FAIL;\n            }\n        }\n\n        ShowBits(psBits, 32, &codeword);\n        if (codeword == USER_DATA_START_CODE)\n        {\n            goto FREE_PS_BITS_AND_FAIL;\n        }\n\n        //visual_object_start_code\n        ReadBits(psBits, 32, &codeword);\n        if (codeword != VISUAL_OBJECT_START_CODE) goto FREE_PS_BITS_AND_FAIL;\n\n        /*  is_visual_object_identifier            */\n        ReadBits(psBits, 1, &codeword);\n        if (codeword) goto FREE_PS_BITS_AND_FAIL;\n\n        /* visual_object_type                                 */\n        ReadBits(psBits, 4, &codeword);\n        if (codeword != 1) goto FREE_PS_BITS_AND_FAIL;\n\n        /* video_signal_type */\n        ReadBits(psBits, 1, &codeword);\n        if (codeword) goto FREE_PS_BITS_AND_FAIL;\n\n        /* next_start_code() */\n        ByteAlign(psBits);\n\n        ShowBits(psBits, 32, &codeword);\n        if (codeword == USER_DATA_START_CODE)\n        {\n            goto FREE_PS_BITS_AND_FAIL;\n        }\n    }\n\n    ShowBits(psBits, 27, &codeword);\n\n    if (codeword == VO_START_CODE)\n    {\n\n        ReadBits(psBits, 32, &codeword);\n\n        /* video_object_layer_start_code                   */\n        ShowBits(psBits, 28, &codeword);\n        if (codeword != VOL_START_CODE)\n        {\n            ShowBits(psBits, 22, &codeword);\n            if (codeword == SHORT_VIDEO_START_MARKER)\n            {\n                iDecodeShortHeader(psBits, aEncOption);\n                return ECVEI_SUCCESS;\n            }\n            else\n            {\n                goto FREE_PS_BITS_AND_FAIL;\n\n            }\n        }\n        /* video_object_layer_start_code                   */\n        ReadBits(psBits, 28, &codeword);\n\n        /* vol_id (4 bits) */\n        ReadBits(psBits, 4, & codeword);\n\n        // RandomAccessibleVOLFlag\n        ReadBits(psBits, 1, &codeword);\n\n        //Video Object Type Indication\n        ReadBits(psBits, 8, &codeword);\n        if (codeword > 2)\n        {\n            goto FREE_PS_BITS_AND_FAIL;\n        }\n\n        // is_object_layer_identifier\n        ReadBits(psBits, 1, &codeword);\n        if (codeword) goto FREE_PS_BITS_AND_FAIL;\n\n        // aspect ratio\n        ReadBits(psBits, 4, &codeword);\n        if (codeword != 1) goto FREE_PS_BITS_AND_FAIL;\n\n        //vol_control_parameters\n        ReadBits(psBits, 1, &codeword);\n        if (codeword != 0) goto FREE_PS_BITS_AND_FAIL;\n\n        //      video_object_layer_shape\n        ReadBits(psBits, 2, &codeword);\n        if (codeword != 0) goto FREE_PS_BITS_AND_FAIL;\n\n        //Marker bit\n        ReadBits(psBits, 1, &codeword);\n        if (codeword != 1) goto FREE_PS_BITS_AND_FAIL;\n\n        //  vop_time_increment_resolution\n        ReadBits(psBits, 16, &codeword);\n        aEncOption->timeIncRes = codeword;\n\n        //Marker bit\n        ReadBits(psBits, 1, &codeword);\n        if (codeword != 1)\n            goto FREE_PS_BITS_AND_FAIL;\n\n        //      fixed_vop_rate\n        ReadBits(psBits, 1, &codeword);\n        if (codeword != 0) goto FREE_PS_BITS_AND_FAIL;\n\n        /* video_object_layer_shape is RECTANGULAR */\n        //Marker bit\n        ReadBits(psBits, 1, &codeword);\n        if (codeword != 1) goto FREE_PS_BITS_AND_FAIL;\n\n        /* this should be 176 for QCIF */\n        ReadBits(psBits, 13, &codeword);\n        aEncOption->encWidth[0] = codeword;\n\n        //Marker bit\n        ReadBits(psBits, 1, &codeword);\n        if (codeword != 1) goto FREE_PS_BITS_AND_FAIL;\n\n        /* this should be 144 for QCIF */\n        ReadBits(psBits, 13, &codeword);\n        aEncOption->encHeight[0] = codeword;\n\n        //Marker bit\n        ReadBits(psBits, 1, &codeword);\n        if (codeword != 1) goto FREE_PS_BITS_AND_FAIL;\n\n        //Interlaced\n        ReadBits(psBits, 1, &codeword);\n        if (codeword != 0) goto FREE_PS_BITS_AND_FAIL;\n\n        //obmc_disable\n        ReadBits(psBits, 1, &codeword);\n        if (codeword != 1) goto FREE_PS_BITS_AND_FAIL;\n\n        //sprite_enable\n        ReadBits(psBits, 1, &codeword);\n        if (codeword != 0) goto FREE_PS_BITS_AND_FAIL;\n\n        //not_8_bit\n        ReadBits(psBits, 1, &codeword);\n        if (codeword != 0) goto FREE_PS_BITS_AND_FAIL;\n\n        /* video_object_layer_shape is not GRAY_SCALE  */\n        //quant_type\n        ReadBits(psBits, 1, &codeword);\n        aEncOption->quantType[0] = codeword;\n        if (codeword != 0) //quant_type = 1\n        {\n            ReadBits(psBits, 1, &codeword); //load_intra_quant_mat\n            if (codeword) goto FREE_PS_BITS_AND_FAIL; // No support for user defined matrix.\n\n            ReadBits(psBits, 1, &codeword); //load_nonintra_quant_mat\n            if (codeword) goto FREE_PS_BITS_AND_FAIL; // No support for user defined matrix.\n\n        }\n\n        //complexity_estimation_disable\n        ReadBits(psBits, 1, &codeword);\n        if (!codeword)\n        {\n            goto FREE_PS_BITS_AND_FAIL;\n        }\n\n        //resync_marker_disable\n        ReadBits(psBits, 1, &codeword);\n        if (codeword)\n        {\n            aEncOption->packetSize = 0;\n        }\n\n        //data_partitioned\n        ReadBits(psBits, 1, &codeword);\n        if (codeword)\n        {\n            aEncOption->encMode = DATA_PARTITIONING_MODE;\n            //reversible_vlc\n            ReadBits(psBits, 1, &codeword);\n            aEncOption->rvlcEnable = (ParamEncMode) codeword;\n\n        }\n        else\n        {\n            // No data_partitioned\n            if (aEncOption->packetSize > 0)\n            {\n                aEncOption->encMode = COMBINE_MODE_WITH_ERR_RES;\n            }\n            else\n            {\n                aEncOption->encMode = COMBINE_MODE_NO_ERR_RES;\n            }\n        }\n\n        //scalability\n        ReadBits(psBits, 1, &codeword);\n        if (codeword) goto FREE_PS_BITS_AND_FAIL;\n\n    }\n    else\n    {\n        /* SHORT_HEADER */\n        ShowBits(psBits, SHORT_VIDEO_START_MARKER_LENGTH, &codeword);\n        if (codeword == SHORT_VIDEO_START_MARKER)\n        {\n            iDecodeShortHeader(psBits, aEncOption);\n        }\n        else\n        {\n            goto FREE_PS_BITS_AND_FAIL;\n\n        }\n    }\n    return ECVEI_SUCCESS;\n\nFREE_PS_BITS_AND_FAIL:\n\n    oscl_free(psBits);\n\n    return ECVEI_FAIL;\n}\n\nint16 CPVM4VEncoder::iDecodeShortHeader(mp4StreamType *psBits, VideoEncOptions *aEncOption)\n{\n    uint32 codeword;\n    int *width, *height;\n\n    //Default values\n    aEncOption->quantType[0] =  0;\n    aEncOption->rvlcEnable = PV_OFF;\n    aEncOption->packetSize = 0;     // Since, by default resync_marker_disable = 1;\n    aEncOption->encMode = SHORT_HEADER;     // NO error resilience\n    width = &(aEncOption->encWidth[0]);\n    height = &(aEncOption->encHeight[0]);\n\n    //short_video_start_marker\n    ShowBits(psBits, 22, &codeword);\n    if (codeword !=  0x20)\n    {\n        return ECVEI_FAIL;\n    }\n    FlushBits(psBits, 22);\n\n    //temporal_reference\n    ReadBits(psBits, 8, &codeword);\n\n    //marker_bit\n    ReadBits(psBits, 1, &codeword);\n    if (codeword == 0) return ECVEI_FAIL;\n\n    //zero_bit\n    ReadBits(psBits, 1, &codeword);\n    if (codeword == 1) return ECVEI_FAIL;\n\n    //split_screen_indicator\n    ReadBits(psBits, 1, &codeword);\n    if (codeword == 1) return ECVEI_FAIL;\n\n    //document_camera_indicator\n    ReadBits(psBits, 1, &codeword);\n    if (codeword == 1) return ECVEI_FAIL;\n\n    //full_picture_freeze_release\n    ReadBits(psBits, 1, &codeword);\n    if (codeword == 1) return ECVEI_FAIL;\n\n    /* source format */\n    ReadBits(psBits, 3, &codeword);\n    switch (codeword)\n    {\n        case 1:\n            *width = 128;\n            *height = 96;\n            break;\n\n        case 2:\n            *width = 176;\n            *height = 144;\n            break;\n\n        case 3:\n            *width = 352;\n            *height = 288;\n            break;\n\n        case 4:\n            *width = 704;\n            *height = 576;\n            break;\n\n        case 5:\n            *width = 1408;\n            *height = 1152;\n            break;\n\n        default:\n            return ECVEI_FAIL;\n    }\n\n    return 0;\n}\n\nint16 CPVM4VEncoder::ShowBits(\n    mp4StreamType *pStream,           /* Input Stream */\n    uint8 ucNBits,          /* nr of bits to read */\n    uint32 *pulOutData      /* output target */\n)\n{\n    uint8 *bits;\n    uint32 dataBitPos = pStream->dataBitPos;\n    uint32 bitPos = pStream->bitPos;\n    uint32 dataBytePos;\n\n    uint i;\n\n    if (ucNBits > (32 - bitPos))    /* not enough bits */\n    {\n        dataBytePos = dataBitPos >> 3; /* Byte Aligned Position */\n        bitPos = dataBitPos & 7; /* update bit position */\n        if (dataBytePos > pStream->numBytes - 4)\n        {\n            pStream->bitBuf = 0;\n            for (i = 0; i < pStream->numBytes - dataBytePos; i++)\n            {\n                pStream->bitBuf |= pStream->data[dataBytePos+i];\n                pStream->bitBuf <<= 8;\n            }\n            pStream->bitBuf <<= 8 * (3 - i);\n        }\n        else\n        {\n            bits = &pStream->data[dataBytePos];\n            pStream->bitBuf = (bits[0] << 24) | (bits[1] << 16) | (bits[2] << 8) | bits[3];\n        }\n        pStream->bitPos = bitPos;\n    }\n\n    bitPos += ucNBits;\n\n    *pulOutData = (pStream->bitBuf >> (32 - bitPos)) & MASK[(uint16)ucNBits];\n\n\n    return 0;\n}\n\nint16 CPVM4VEncoder::FlushBits(\n    mp4StreamType *pStream,           /* Input Stream */\n    uint8 ucNBits                      /* number of bits to flush */\n)\n{\n    uint8 *bits;\n    uint32 dataBitPos = pStream->dataBitPos;\n    uint32 bitPos = pStream->bitPos;\n    uint32 dataBytePos;\n\n\n    if ((dataBitPos + ucNBits) > (uint32)(pStream->numBytes << 3))\n        return (-2); // Buffer over run\n\n    dataBitPos += ucNBits;\n    bitPos     += ucNBits;\n\n    if (bitPos > 32)\n    {\n        dataBytePos = dataBitPos >> 3;    /* Byte Aligned Position */\n        bitPos = dataBitPos & 7; /* update bit position */\n        bits = &pStream->data[dataBytePos];\n        pStream->bitBuf = (bits[0] << 24) | (bits[1] << 16) | (bits[2] << 8) | bits[3];\n    }\n\n    pStream->dataBitPos = dataBitPos;\n    pStream->bitPos     = bitPos;\n\n    return 0;\n}\n\nint16 CPVM4VEncoder::ReadBits(\n    mp4StreamType *pStream,           /* Input Stream */\n    uint8 ucNBits,                     /* nr of bits to read */\n    uint32 *pulOutData                 /* output target */\n)\n{\n    uint8 *bits;\n    uint32 dataBitPos = pStream->dataBitPos;\n    uint32 bitPos = pStream->bitPos;\n    uint32 dataBytePos;\n\n\n    if ((dataBitPos + ucNBits) > (pStream->numBytes << 3))\n    {\n        *pulOutData = 0;\n        return (-2); // Buffer over run\n    }\n\n    //  dataBitPos += ucNBits;\n\n    if (ucNBits > (32 - bitPos))    /* not enough bits */\n    {\n        dataBytePos = dataBitPos >> 3;    /* Byte Aligned Position */\n        bitPos = dataBitPos & 7; /* update bit position */\n        bits = &pStream->data[dataBytePos];\n        pStream->bitBuf = (bits[0] << 24) | (bits[1] << 16) | (bits[2] << 8) | bits[3];\n    }\n\n    pStream->dataBitPos += ucNBits;\n    pStream->bitPos      = (unsigned char)(bitPos + ucNBits);\n\n    *pulOutData = (pStream->bitBuf >> (32 - pStream->bitPos)) & MASK[(uint16)ucNBits];\n\n    return 0;\n}\n\n\n\nint16 CPVM4VEncoder::ByteAlign(\n    mp4StreamType *pStream           /* Input Stream */\n)\n{\n    uint8 *bits;\n    uint32 dataBitPos = pStream->dataBitPos;\n    uint32 bitPos = pStream->bitPos;\n    uint32 dataBytePos;\n    uint32 leftBits;\n\n    leftBits =  8 - (dataBitPos & 0x7);\n    if (leftBits == 8)\n    {\n        if ((dataBitPos + 8) > (uint32)(pStream->numBytes << 3))\n            return (-2); // Buffer over run\n        dataBitPos += 8;\n        bitPos += 8;\n    }\n    else\n    {\n        dataBytePos = dataBitPos >> 3;\n        dataBitPos += leftBits;\n        bitPos += leftBits;\n    }\n\n\n    if (bitPos > 32)\n    {\n        dataBytePos = dataBitPos >> 3;    /* Byte Aligned Position */\n        bits = &pStream->data[dataBytePos];\n        pStream->bitBuf = (bits[0] << 24) | (bits[1] << 16) | (bits[2] << 8) | bits[3];\n    }\n\n    pStream->dataBitPos = dataBitPos;\n    pStream->bitPos     = bitPos;\n\n    return 0;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::Initialize(TPVVideoInputFormat *aVidInFormat, TPVVideoEncodeParam *aEncParam)\n{\n\n    int i;\n    TCVEI_RETVAL status;\n    MP4EncodingMode ENC_Mode ;\n    ParamEncMode RvlcMode = PV_OFF; /* default no RVLC */\n    Int quantType[2] = {0, 0};      /* default H.263 quant*/\n    VideoEncOptions aEncOption; /* encoding options */\n\n    iState = EIdle ; // stop encoding\n    iId = aEncParam->iEncodeID;\n\n    iOverrunBuffer = NULL;\n    iOBSize = 0;\n\n    if (aEncParam->iContentType ==  ECVEI_STREAMING)\n    {\n        ENC_Mode = DATA_PARTITIONING_MODE;\n    }\n    else if (aEncParam->iContentType == ECVEI_DOWNLOAD)\n    {\n        if (aEncParam->iPacketSize > 0)\n        {\n            ENC_Mode = COMBINE_MODE_WITH_ERR_RES;\n        }\n        else\n        {\n            ENC_Mode = COMBINE_MODE_NO_ERR_RES;\n        }\n    }\n    else if (aEncParam->iContentType == ECVEI_H263)\n    {\n        if (aEncParam->iPacketSize > 0)\n        {\n            ENC_Mode = H263_MODE_WITH_ERR_RES;\n        }\n        else\n        {\n            ENC_Mode = H263_MODE;\n        }\n    }\n    else\n    {\n        return ECVEI_FAIL;\n    }\n\n    iSrcWidth = aVidInFormat->iFrameWidth;\n    iSrcHeight = aVidInFormat->iFrameHeight;\n    iSrcFrameRate = (int) aVidInFormat->iFrameRate;\n    iVideoFormat = (TPVVideoFormat) aVidInFormat->iVideoFormat;\n    iFrameOrientation = aVidInFormat->iFrameOrientation;\n\n    if (iInitialized == true)  /* clean up before re-initialized */\n    {\n        /*status = */\n        (int) PVCleanUpVideoEncoder(&iEncoderControl);\n        if (iYUVIn)\n        {\n            oscl_free(iYUVIn);\n            iYUVIn = NULL;\n        }\n    }\n\n    // allocate iYUVIn\n    if (((iSrcWidth&0xF) || (iSrcHeight&0xF)) || iVideoFormat != ECVEI_YUV420) /* Not multiple of 16 */\n    {\n        iYUVIn = (uint8*) oscl_malloc(((((iSrcWidth + 15) >> 4) * ((iSrcHeight + 15) >> 4)) * 3) << 7);\n        if (iYUVIn == NULL)\n        {\n            return ECVEI_FAIL;\n        }\n    }\n\n    // check the buffer delay according to the clip duration\n    if (aEncParam->iClipDuration > 0 && aEncParam->iRateControlType == EVBR_1)\n    {\n        if (aEncParam->iBufferDelay > aEncParam->iClipDuration / 10000.0)   //enforce 10% variation of the clip duration as the bound of buffer delay\n        {\n            aEncParam->iBufferDelay = aEncParam->iClipDuration / (float)10000.0;\n        }\n    }\n\n    if (iVideoFormat == ECVEI_RGB24)\n    {\n#ifdef RGB24_INPUT\n        ccRGBtoYUV = CCRGB24toYUV420::New();\n#else\n        return ECVEI_FAIL;\n#endif\n    }\n    if (iVideoFormat == ECVEI_RGB12)\n    {\n#ifdef RGB12_INPUT\n        ccRGBtoYUV = CCRGB12toYUV420::New();\n#else\n        return ECVEI_FAIL;\n#endif\n    }\n    if (iVideoFormat == ECVEI_YUV420SEMIPLANAR)\n    {\n#ifdef YUV420SEMIPLANAR_INPUT\n        ccRGBtoYUV = CCYUV420SEMItoYUV420::New();\n#else\n        return ECVEI_FAIL;\n#endif\n    }\n\n    if ((iVideoFormat == ECVEI_RGB12) || (iVideoFormat == ECVEI_RGB24) || (iVideoFormat == ECVEI_YUV420SEMIPLANAR))\n    {\n#if defined(RGB24_INPUT) || defined (RGB12_INPUT) || defined (YUV420SEMIPLANAR_INPUT)\n        ccRGBtoYUV->Init(iSrcWidth, iSrcHeight, iSrcWidth, iSrcWidth, iSrcHeight, ((iSrcWidth + 15) >> 4) << 4, (iFrameOrientation == 1 ? CCBOTTOM_UP : 0));\n#endif\n    }\n\n    PVGetDefaultEncOption(&aEncOption, 0);\n\n\n    /* iContentType is M4v && FSI Buffer is input parameter */\n    if ((aEncParam->iContentType != ECVEI_H263) && (aEncParam->iFSIBuffLength))\n    {\n        aEncOption.encMode = ENC_Mode;\n        aEncOption.packetSize = aEncParam->iPacketSize;\n        aEncOption.numLayers = aEncParam->iNumLayer;\n\n        status = ParseFSI(aEncParam->iFSIBuff, aEncParam->iFSIBuffLength, &aEncOption);\n        if (ECVEI_FAIL == status)\n        {\n            return ECVEI_FAIL;\n        }\n\n        aEncOption.tickPerSrc = (int)(aEncOption.timeIncRes / aVidInFormat->iFrameRate + 0.5);\n\n        for (i = 0; i < aEncParam->iNumLayer; i++)\n        {\n            aEncOption.encFrameRate[i] = iEncFrameRate[i] = aEncParam->iFrameRate[i];\n            aEncOption.bitRate[i] = aEncParam->iBitRate[i];\n            aEncOption.iQuant[i] = aEncParam->iIquant[i];\n            aEncOption.pQuant[i] = aEncParam->iPquant[i];\n        }\n\n        if (aEncParam->iRateControlType == ECONSTANT_Q)\n            aEncOption.rcType = CONSTANT_Q;\n        else if (aEncParam->iRateControlType == ECBR_1)\n            aEncOption.rcType = CBR_1;\n        else if (aEncParam->iRateControlType == EVBR_1)\n            aEncOption.rcType = VBR_1;\n        else\n            return ECVEI_FAIL;\n\n    }\n    else    // All default Settings\n    {\n        aEncOption.encMode = ENC_Mode;\n        aEncOption.packetSize = aEncParam->iPacketSize;\n\n        Int profile_level = (Int)ECVEI_CORE_LEVEL2;\n        if (aEncParam->iNumLayer > 1) profile_level = (Int)ECVEI_CORE_SCALABLE_LEVEL3;\n        aEncOption.profile_level = (ProfileLevelType)profile_level;\n\n        aEncOption.rvlcEnable = RvlcMode;\n        aEncOption.numLayers = aEncParam->iNumLayer;\n        aEncOption.timeIncRes = 1000;\n        aEncOption.tickPerSrc = (int)(1000 / aVidInFormat->iFrameRate + 0.5);\n\n        for (i = 0; i < aEncParam->iNumLayer; i++)\n        {\n            aEncOption.encWidth[i] = iEncWidth[i] = aEncParam->iFrameWidth[i];\n            aEncOption.encHeight[i] = iEncHeight[i] = aEncParam->iFrameHeight[i];\n            aEncOption.encFrameRate[i] = iEncFrameRate[i] = aEncParam->iFrameRate[i];\n            aEncOption.bitRate[i] = aEncParam->iBitRate[i];\n            aEncOption.iQuant[i] = aEncParam->iIquant[i];\n            aEncOption.pQuant[i] = aEncParam->iPquant[i];\n            aEncOption.quantType[i] = quantType[i]; /* default to H.263 */\n        }\n\n        if (aEncParam->iRateControlType == ECONSTANT_Q)\n            aEncOption.rcType = CONSTANT_Q;\n        else if (aEncParam->iRateControlType == ECBR_1)\n            aEncOption.rcType = CBR_1;\n        else if (aEncParam->iRateControlType == EVBR_1)\n            aEncOption.rcType = VBR_1;\n        else\n            return ECVEI_FAIL;\n\n        // Check the bitrate, framerate, image size and buffer delay for 3GGP compliance\n#ifdef FOR_3GPP_COMPLIANCE\n        Check3GPPCompliance(aEncParam, iEncWidth, iEncHeight);\n#endif\n    }\n\n\n    aEncOption.vbvDelay = (float)aEncParam->iBufferDelay;\n    switch (aEncParam->iIFrameInterval)\n    {\n        case -1:\n            aEncOption.intraPeriod = -1;\n            break;\n        case 0:\n            aEncOption.intraPeriod = 0;\n            break;\n        default:\n            aEncOption.intraPeriod = (int)(aEncParam->iIFrameInterval *  aVidInFormat->iFrameRate);\n            break;\n    }\n    aEncOption.numIntraMB = aEncParam->iNumIntraMBRefresh;\n    aEncOption.sceneDetect = (aEncParam->iSceneDetection == true) ? PV_ON : PV_OFF;\n    aEncOption.noFrameSkipped = (aEncParam->iNoFrameSkip == true) ? PV_ON : PV_OFF;\n    aEncOption.searchRange = aEncParam->iSearchRange;\n    aEncOption.mv8x8Enable = (aEncParam->iMV8x8 == true) ? PV_ON : PV_OFF;\n\n    if (PV_FALSE == PVInitVideoEncoder(&iEncoderControl, &aEncOption))\n    {\n        goto FAIL;\n    }\n\n    iNextModTime = 0;\n    iInitialized = true;\n    return ECVEI_SUCCESS;\n\nFAIL:\n    iInitialized = false;\n    return ECVEI_FAIL;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF int32 CPVM4VEncoder::GetBufferSize()\n{\n    Int bufSize = 0;\n\n    //PVGetVBVSize(&iEncoderControl,&bufSize);\n    PVGetMaxVideoFrameSize(&iEncoderControl, &bufSize);\n\n    return (int32) bufSize;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF int32 CPVM4VEncoder::GetEncodeWidth(int32 aLayer)\n{\n    return (int32)iEncWidth[aLayer];\n}\n\nOSCL_EXPORT_REF int32 CPVM4VEncoder::GetEncodeHeight(int32 aLayer)\n{\n    return (int32)iEncHeight[aLayer];\n}\n\nOSCL_EXPORT_REF float CPVM4VEncoder::GetEncodeFrameRate(int32 aLayer)\n{\n    return iEncFrameRate[aLayer];\n}\nOSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::GetVolHeader(uint8 *volHeader, int32 *size, int32 layer)\n{\n    Int aSize, aLayer = layer;\n\n    if (iInitialized == false) /* has to be initialized first */\n        return ECVEI_FAIL;\n\n    aSize = *size;\n    if (PVGetVolHeader(&iEncoderControl, (UChar*)volHeader, &aSize, aLayer) == PV_FALSE)\n        return ECVEI_FAIL;\n\n    *size = aSize;\n    return ECVEI_SUCCESS;\n}\n\n#ifdef PVAUTHOR_PROFILING\n#include \"pvauthorprofile.h\"\n#endif\n\n/* ///////////////////////////////////////////////////////////////////////// */\n// Value of aRemainingBytes is relevant when overrun buffer is used and return value is ECVEI_MORE_OUTPUT\nOSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::EncodeFrame(TPVVideoInputData  *aVidIn, TPVVideoOutputData *aVidOut, int *aRemainingBytes\n#ifdef PVAUTHOR_PROFILING\n        , void *aParam1\n#endif\n                                                       )\n{\n\n    Bool status;\n    Int Size;\n    Int nLayer = 0;\n    ULong modTime;\n    VideoEncFrameIO vid_in, vid_out;\n    *aRemainingBytes = 0;\n\n    if (iState == EEncode && iOverrunBuffer) // more output buffer to be copied out.\n    {\n        if (iOBSize > aVidOut->iBitStreamSize)\n        {\n            oscl_memcpy(aVidOut->iBitStream, iOverrunBuffer, aVidOut->iBitStreamSize);\n            iOBSize -= aVidOut->iBitStreamSize;\n            iOverrunBuffer += aVidOut->iBitStreamSize;\n            *aRemainingBytes = iOBSize;\n            return ECVEI_MORE_OUTPUT;\n        }\n        else\n        {\n            oscl_memcpy(aVidOut->iBitStream, iOverrunBuffer, iOBSize);\n            aVidOut->iBitStreamSize = iOBSize;\n            iOverrunBuffer = NULL;\n            iOBSize = 0;\n            iState = EIdle;\n            *aRemainingBytes = 0;\n            return ECVEI_SUCCESS;\n        }\n    }\n\n    if (aVidIn->iSource == NULL)\n    {\n        return ECVEI_FAIL;\n    }\n\n    if (aVidIn->iTimeStamp >= iNextModTime) /* time to encode */\n    {\n        iState = EIdle; /* stop current encoding */\n\n        Size = aVidOut->iBitStreamSize;\n\n#ifdef PVAUTHOR_PROFILING\n        if (aParam1)((CPVAuthorProfile*)aParam1)->Start();\n#endif\n\n        if (iVideoFormat == ECVEI_YUV420)\n#ifdef YUV_INPUT\n        {\n            if (iYUVIn) /* iSrcWidth or iSrcHeight is not multiple of 16 */\n            {\n                CopyToYUVIn(aVidIn->iSource, iSrcWidth, iSrcHeight,\n                ((iSrcWidth + 15) >> 4) << 4, ((iSrcHeight + 15) >> 4) << 4);\n                iVideoIn = iYUVIn;\n            }\n            else /* otherwise, we can just use aVidIn->iSource */\n            {\n                iVideoIn = aVidIn->iSource;\n            }\n        }\n#else\n            return ECVEI_FAIL;\n#endif\n        else if ((iVideoFormat == ECVEI_RGB12) || (iVideoFormat == ECVEI_RGB24) || (iVideoFormat == ECVEI_YUV420SEMIPLANAR))\n#if defined(RGB24_INPUT) || defined (RGB12_INPUT) || defined(YUV420SEMIPLANAR_INPUT)\n        {\n            ccRGBtoYUV->Convert((uint8*)aVidIn->iSource, iYUVIn);\n            iVideoIn = iYUVIn;\n        }\n#else\n            return ECVEI_FAIL;\n#endif\n\n#ifdef PVAUTHOR_PROFILING\n        if (aParam1)((CPVAuthorProfile*)aParam1)->Stop(CPVAuthorProfile::EColorInput);\n#endif\n\n#ifdef PVAUTHOR_PROFILING\n        if (aParam1)((CPVAuthorProfile*)aParam1)->Start();\n#endif\n\n        /* with backward-P or B-Vop this timestamp must be re-ordered */\n        aVidOut->iExternalTimeStamp = aVidIn->iTimeStamp;\n        aVidOut->iVideoTimeStamp = aVidOut->iExternalTimeStamp;\n\n        vid_in.height = ((iSrcHeight + 15) >> 4) << 4;\n        vid_in.pitch = ((iSrcWidth + 15) >> 4) << 4;\n        vid_in.timestamp = aVidIn->iTimeStamp;\n        vid_in.yChan = (UChar*)iVideoIn;\n        vid_in.uChan = (UChar*)(iVideoIn + vid_in.height * vid_in.pitch);\n        vid_in.vChan = vid_in.uChan + ((vid_in.height * vid_in.pitch) >> 2);\n\n        status = PVEncodeVideoFrame(&iEncoderControl,\n                                    &vid_in, &vid_out, &modTime, (UChar*)aVidOut->iBitStream, &Size, &nLayer);\n\n        if (status == PV_TRUE)\n        {\n            iNextModTime = modTime;\n            aVidOut->iLayerNumber = nLayer;\n            aVidOut->iFrame = iVideoOut = (uint8*)vid_out.yChan;\n            aVidOut->iVideoTimeStamp = vid_out.timestamp;\n\n            PVGetHintTrack(&iEncoderControl, &aVidOut->iHintTrack);\n\n#ifdef PVAUTHOR_PROFILING\n            if (aParam1)((CPVAuthorProfile*)aParam1)->Stop(CPVAuthorProfile::EVideoEncode);\n#endif\n\n            iOverrunBuffer = PVGetOverrunBuffer(&iEncoderControl);\n            if (iOverrunBuffer != NULL && nLayer != -1)\n            {\n                oscl_memcpy(aVidOut->iBitStream, iOverrunBuffer, aVidOut->iBitStreamSize);\n                iOBSize = Size - aVidOut->iBitStreamSize;\n                iOverrunBuffer += aVidOut->iBitStreamSize;\n                iState = EEncode;\n                return ECVEI_MORE_OUTPUT;\n            }\n            else\n            {\n                aVidOut->iBitStreamSize = Size;\n                return ECVEI_SUCCESS;\n            }\n        }\n        else\n            return ECVEI_FAIL;\n    }\n    else /* if(aVidIn->iTimeStamp >= iNextModTime) */\n    {\n        aVidOut->iLayerNumber = -1;\n        aVidOut->iBitStreamSize = 0;\n#ifdef PVAUTHOR_PROFILING\n        if (aParam1)((CPVAuthorProfile*)aParam1)->AddVal\n            (CPVAuthorProfile::EVidSkip, iNextModTime - aVidIn->iTimeStamp);\n#endif\n        return ECVEI_SUCCESS;\n    }\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::FlushOutput(TPVVideoOutputData *aVidOut)\n{\n    OSCL_UNUSED_ARG(aVidOut);\n    return ECVEI_SUCCESS;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nTCVEI_RETVAL CPVM4VEncoder::Terminate()\n{\n    iState = EIdle; /* stop current encoding */\n    if (iInitialized == true)\n    {\n        PVCleanUpVideoEncoder(&iEncoderControl);\n        iInitialized = false;\n\n    }\n\n    if (iYUVIn)\n    {\n        oscl_free(iYUVIn);\n        iYUVIn = NULL;\n    }\n    return ECVEI_SUCCESS;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::UpdateBitRate(int32 aNumLayer, int32 *aBitRate)\n{\n#ifndef LIMITED_API\n    Int i, bitRate[2] = {0, 0};\n\n    for (i = 0; i < aNumLayer; i++)\n    {\n        bitRate[i] = aBitRate[i];\n    }\n\n    if (PVUpdateBitRate(&iEncoderControl, &bitRate[0]) == PV_TRUE)\n        return ECVEI_SUCCESS;\n    else\n#endif\n        return ECVEI_FAIL;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::UpdateFrameRate(int32 aNumLayer, float *aFrameRate)\n{\n    OSCL_UNUSED_ARG(aNumLayer);\n#ifndef LIMITED_API\n    if (PVUpdateEncFrameRate(&iEncoderControl, aFrameRate) == PV_TRUE)\n        return ECVEI_SUCCESS;\n    else\n#else\n    OSCL_UNUSED_ARG(aFrameRate);\n#endif\n        return ECVEI_FAIL;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::UpdateIFrameInterval(int32 aIFrameInterval)\n{\n#ifndef LIMITED_API\n    if (PVUpdateIFrameInterval(&iEncoderControl, (Int)aIFrameInterval) == PV_TRUE)\n        return ECVEI_SUCCESS;\n    else\n#endif\n        return ECVEI_FAIL;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::IFrameRequest()\n{\n#ifndef LIMITED_API\n    if (PVIFrameRequest(&iEncoderControl) == PV_TRUE)\n        return ECVEI_SUCCESS;\n    else\n#endif\n        return ECVEI_FAIL;\n}\n\n/* ///////////////////////////////////////////////////////////////////////// */\nOSCL_EXPORT_REF TCVEI_RETVAL CPVM4VEncoder::SetIntraMBRefresh(int32 aNumMBRefresh)\n{\n#ifndef LIMITED_API\n    if (PVUpdateNumIntraMBRefresh(&iEncoderControl, aNumMBRefresh) == PV_TRUE)\n        return ECVEI_SUCCESS;\n    else\n#endif\n        return ECVEI_FAIL;\n}\n\n#ifdef YUV_INPUT\n/* ///////////////////////////////////////////////////////////////////////// */\n/* Copy from YUV input to YUV frame inside M4VEnc lib                       */\n/* When input is not YUV, the color conv will write it directly to iVideoInOut. */\n/* ///////////////////////////////////////////////////////////////////////// */\n\nvoid CPVM4VEncoder::CopyToYUVIn(uint8 *YUV, Int width, Int height, Int width_16, Int height_16)\n{\n    UChar *y, *u, *v, *yChan, *uChan, *vChan;\n    Int y_ind, ilimit, jlimit, i, j, ioffset;\n    Int size = width * height;\n    Int size16 = width_16 * height_16;\n\n    /* do padding at the bottom first */\n    /* do padding if input RGB size(height) is different from the output YUV size(height_16) */\n    if (height < height_16 || width < width_16) /* if padding */\n    {\n        Int offset = (height < height_16) ? height : height_16;\n\n        offset = (offset * width_16);\n\n        if (width < width_16)\n        {\n            offset -= (width_16 - width);\n        }\n\n        yChan = (UChar*)(iYUVIn + offset);\n        oscl_memset(yChan, 16, size16 - offset); /* pad with zeros */\n\n        uChan = (UChar*)(iYUVIn + size16 + (offset >> 2));\n        oscl_memset(uChan, 128, (size16 - offset) >> 2);\n\n        vChan = (UChar*)(iYUVIn + size16 + (size16 >> 2) + (offset >> 2));\n        oscl_memset(vChan, 128, (size16 - offset) >> 2);\n    }\n\n    /* then do padding on the top */\n    yChan = (UChar*)iYUVIn; /* Normal order */\n    uChan = (UChar*)(iYUVIn + size16);\n    vChan = (UChar*)(uChan + (size16 >> 2));\n\n    u = (UChar*)(&(YUV[size]));\n    v = (UChar*)(&(YUV[size*5/4]));\n\n    /* To center the output */\n    if (height_16 > height)   /* output taller than input */\n    {\n        if (width_16 >= width)  /* output wider than or equal input */\n        {\n            i = ((height_16 - height) >> 1) * width_16 + (((width_16 - width) >> 3) << 2);\n            /* make sure that (width_16-width)>>1 is divisible by 4 */\n            j = ((height_16 - height) >> 2) * (width_16 >> 1) + (((width_16 - width) >> 4) << 2);\n            /* make sure that (width_16-width)>>2 is divisible by 4 */\n        }\n        else  /* output narrower than input */\n        {\n            i = ((height_16 - height) >> 1) * width_16;\n            j = ((height_16 - height) >> 2) * (width_16 >> 1);\n            YUV += ((width - width_16) >> 1);\n            u += ((width - width_16) >> 2);\n            v += ((width - width_16) >> 2);\n        }\n        oscl_memset((uint8 *)yChan, 16, i);\n        yChan += i;\n        oscl_memset((uint8 *)uChan, 128, j);\n        uChan += j;\n        oscl_memset((uint8 *)vChan, 128, j);\n        vChan += j;\n    }\n    else   /* output shorter or equal input */\n    {\n        if (width_16 >= width)   /* output wider or equal input */\n        {\n            i = (((width_16 - width) >> 3) << 2);\n            /* make sure that (width_16-width)>>1 is divisible by 4 */\n            j = (((width_16 - width) >> 4) << 2);\n            /* make sure that (width_16-width)>>2 is divisible by 4 */\n            YUV += (((height - height_16) >> 1) * width);\n            u += (((height - height_16) >> 1) * width) >> 2;\n            v += (((height - height_16) >> 1) * width) >> 2;\n        }\n        else  /* output narrower than input */\n        {\n            i = 0;\n            j = 0;\n            YUV += (((height - height_16) >> 1) * width + ((width - width_16) >> 1));\n            u += (((height - height_16) >> 1) * width + ((width - width_16) >> 1)) >> 2;\n            v += (((height - height_16) >> 1) * width + ((width - width_16) >> 1)) >> 2;\n        }\n        oscl_memset((uint8 *)yChan, 16, i);\n        yChan += i;\n        oscl_memset((uint8 *)uChan, 128, j);\n        uChan += j;\n        oscl_memset((uint8 *)vChan, 128, j);\n        vChan += j;\n    }\n\n    /* Copy with cropping or zero-padding */\n    if (height < height_16)\n        jlimit = height;\n    else\n        jlimit = height_16;\n\n    if (width < width_16)\n    {\n        ilimit = width;\n        ioffset = width_16 - width;\n    }\n    else\n    {\n        ilimit = width_16;\n        ioffset = 0;\n    }\n\n    /* Copy Y */\n    /* Set up pointer for fast looping */\n    y = (UChar*)YUV;\n\n    if (width == width_16 && height == height_16) /* no need to pad */\n    {\n        oscl_memcpy(yChan, y, size);\n    }\n    else\n    {\n        for (y_ind = 0; y_ind < (jlimit - 1) ; y_ind++)\n        {\n            oscl_memcpy(yChan, y, ilimit);\n            oscl_memset(yChan + ilimit, 16, ioffset); /* pad with zero */\n            yChan += width_16;\n            y += width;\n        }\n        oscl_memcpy(yChan, y, ilimit); /* last line no padding */\n    }\n    /* Copy U and V */\n    /* Set up pointers for fast looping */\n    if (width == width_16 && height == height_16) /* no need to pad */\n    {\n        oscl_memcpy(uChan, u, size >> 2);\n        oscl_memcpy(vChan, v, size >> 2);\n    }\n    else\n    {\n        for (y_ind = 0; y_ind < (jlimit >> 1) - 1; y_ind++)\n        {\n            oscl_memcpy(uChan, u, ilimit >> 1);\n            oscl_memcpy(vChan, v, ilimit >> 1);\n            oscl_memset(uChan + (ilimit >> 1), 128, ioffset >> 1);\n            oscl_memset(vChan + (ilimit >> 1), 128, ioffset >> 1);\n            uChan += (width_16 >> 1);\n            u += (width >> 1);\n            vChan += (width_16 >> 1);\n            v += (width >> 1);\n        }\n        oscl_memcpy(uChan, u, ilimit >> 1); /* last line no padding */\n        oscl_memcpy(vChan, v, ilimit >> 1);\n    }\n\n    return ;\n}\n#endif\n\n#ifdef FOR_3GPP_COMPLIANCE\nvoid CPVM4VEncoder::Check3GPPCompliance(TPVVideoEncodeParam *aEncParam, Int *aEncWidth, Int *aEncHeight)\n{\n\n//MPEG-4 Simple profile and level 0\n#define MAX_BITRATE 64000\n#define MAX_FRAMERATE 15\n#define MAX_WIDTH 176\n#define MAX_HEIGHT 144\n#define MAX_BUFFERSIZE 163840\n\n    // check bitrate, framerate, video size and vbv buffer\n    if (aEncParam->iBitRate[0] > MAX_BITRATE) aEncParam->iBitRate[0] = MAX_BITRATE;\n    if (aEncParam->iFrameRate[0] > MAX_FRAMERATE) aEncParam->iFrameRate[0] = MAX_FRAMERATE;\n    if (aEncWidth[0] > MAX_WIDTH) aEncWidth[0] = MAX_WIDTH;\n    if (aEncHeight[0] > MAX_HEIGHT) aEncHeight[0] = MAX_HEIGHT;\n    if (aEncParam->iBitRate[0]*aEncParam->iBufferDelay > MAX_BUFFERSIZE)\n        aEncParam->iBufferDelay = (float)MAX_BUFFERSIZE / aEncParam->iBitRate[0];\n}\n#endif\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/rate_control.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"mp4def.h\"\n#include \"mp4lib_int.h\"\n#include \"rate_control.h\"\n#include \"mp4enc_lib.h\"\n#include \"bitstream_io.h\"\n#include \"m4venc_oscl.h\"\n\nvoid targetBitCalculation(void *input);\nvoid calculateQuantizer_Multipass(void *video);\nvoid updateRateControl(rateControl *rc, VideoEncData *video);\nvoid updateRC_PostProc(rateControl *rc, VideoEncData *video);\n\n/***************************************************************************\n**************  RC APIs to core encoding modules  *******************\n\nPV_STATUS RC_Initialize(void *video);\nPV_STATUS RC_Cleanup(rateControl *rc[],Int numLayers);\nPV_STATUS RC_VopQPSetting(VideoEncData *video,rateControl *rc[]);\nPV_STATUS RC_VopUpdateStat(VideoEncData *video,rateControl *rc[]);\nPV_STATUS RC_UpdateBuffer(VideoEncData *video, Int currLayer, Int num_skip);\nInt       RC_GetSkipNextFrame(VideoEncData *video,Int currLayer);\nvoid      RC_ResetSkipNextFrame(void *video,Int currLayer);\n\nPV_STATUS RC_UpdateBXRCParams(void *input);  Parameters update for target bitrate or framerate change\n\n****************************************************************************/\n\n\n/************************************************************************/\n/************ API part **************************************************/\n/* must be called before each sequence*/\n\nPV_STATUS RC_Initialize(void *input)\n{\n    VideoEncData *video = (VideoEncData *) input;\n    VideoEncParams *encParams = video->encParams;\n    rateControl **rc = video->rc;\n    Int numLayers = encParams->nLayers;\n    Int *LayerBitRate = encParams->LayerBitRate;\n    float *LayerFrameRate = encParams->LayerFrameRate;\n    MultiPass **pMP = video->pMP;\n\n    Int n;\n\n    for (n = 0; n < numLayers; n++)\n    {\n        /* rate control */\n        rc[n]->fine_frame_skip = encParams->FineFrameSkip_Enabled;\n        rc[n]->no_frame_skip = encParams->NoFrameSkip_Enabled;\n        rc[n]->no_pre_skip = encParams->NoPreSkip_Enabled;\n        rc[n]->skip_next_frame = 0; /* must be initialized */\n\n        //rc[n]->TMN_TH = (Int)((float)LayerBitRate[n]/LayerFrameRate[n]);\n        rc[n]->Bs = video->encParams->BufferSize[n];\n        rc[n]->TMN_W = 0;\n        rc[n]->VBV_fullness = (Int)(rc[n]->Bs * 0.5); /* rc[n]->Bs */\n        rc[n]->encoded_frames = 0;\n        rc[n]->framerate = LayerFrameRate[n];\n        if (n == 0)\n        {\n            rc[n]->TMN_TH = (Int)((float)LayerBitRate[n] / LayerFrameRate[n]);\n            rc[n]->bitrate = LayerBitRate[n];\n            rc[n]->framerate = LayerFrameRate[n];\n\n            // For h263 or short header mode, the bit variation is within (-2*Rmax*1001/3000, 2*Rmax*1001/3000)\n            if (video->encParams->H263_Enabled)\n            {\n                rc[n]->max_BitVariance_num = (Int)((rc[n]->Bs - video->encParams->maxFrameSize) / 2 / (rc[n]->bitrate / rc[n]->framerate / 10.0)) - 5;\n                if (rc[n]->max_BitVariance_num < 0) rc[n]->max_BitVariance_num += 5;\n            }\n            else   // MPEG-4 normal modes\n            {\n                rc[n]->max_BitVariance_num = (Int)((float)(rc[n]->Bs - rc[n]->VBV_fullness) / ((float)LayerBitRate[n] / LayerFrameRate[n] / 10.0)) - 5;\n                if (rc[n]->max_BitVariance_num < 0) rc[n]->max_BitVariance_num += 5;\n            }\n        }\n        else\n        {\n            if (LayerFrameRate[n] - LayerFrameRate[n-1] > 0) /*  7/31/03 */\n            {\n                rc[n]->TMN_TH = (Int)((float)(LayerBitRate[n] - LayerBitRate[n-1]) / (LayerFrameRate[n] - LayerFrameRate[n-1]));\n                rc[n]->max_BitVariance_num = (Int)((float)(rc[n]->Bs - rc[n]->VBV_fullness) * 10 / ((float)rc[n]->TMN_TH)) - 5;\n                if (rc[n]->max_BitVariance_num < 0) rc[n]->max_BitVariance_num += 5;\n            }\n            else   /*  7/31/03 */\n            {\n                rc[n]->TMN_TH = 1 << 30;\n                rc[n]->max_BitVariance_num = 0;\n            }\n            rc[n]->bitrate = LayerBitRate[n] - LayerBitRate[n-1];\n            rc[n]->framerate = LayerFrameRate[n] - LayerFrameRate[n-1];\n        }\n\n        // Set the initial buffer fullness\n        if (1) //!video->encParams->H263_Enabled)  { // MPEG-4\n        {\n            /* According to the spec, the initial buffer fullness needs to be set to 1/3 */\n            rc[n]->VBV_fullness = (Int)(rc[n]->Bs / 3.0 - rc[n]->Bs / 2.0); /* the buffer range is [-Bs/2, Bs/2] */\n            pMP[n]->counter_BTsrc = (Int)((rc[n]->Bs / 2.0 - rc[n]->Bs / 3.0) / (rc[n]->bitrate / rc[n]->framerate / 10.0));\n            rc[n]->TMN_W = (Int)(rc[n]->VBV_fullness + pMP[n]->counter_BTsrc * (rc[n]->bitrate / rc[n]->framerate / 10.0));\n\n            rc[n]->low_bound = -rc[n]->Bs / 2;\n            rc[n]-> VBV_fullness_offset = 0;\n        }\n        else   /* this part doesn't work in some cases, the low_bound is too high, Jan 4,2006 */\n        {\n            rc[n]->VBV_fullness =  rc[n]->Bs - (Int)(video->encParams->VBV_delay * rc[n]->bitrate);\n            if (rc[n]->VBV_fullness < 0) rc[n]->VBV_fullness = 0;\n            //rc[n]->VBV_fullness = (rc[n]->Bs-video->encParams->maxFrameSize)/2 + video->encParams->maxFrameSize;\n\n            rc[n]->VBV_fullness -= rc[n]->Bs / 2; /* the buffer range is [-Bs/2, Bs/2] */\n            rc[n]->low_bound = -rc[n]->Bs / 2 + video->encParams->maxFrameSize;  /*  too high */\n            rc[n]->VBV_fullness_offset = video->encParams->maxFrameSize / 2; /*  don't understand the meaning of this */\n            pMP[n]->counter_BTdst = pMP[n]->counter_BTsrc = 0;\n\n        }\n\n        /* Setting the bitrate and framerate */\n        pMP[n]->bitrate = rc[n]->bitrate;\n        pMP[n]->framerate = rc[n]->framerate;\n        pMP[n]->target_bits_per_frame = pMP[n]->bitrate / pMP[n]->framerate;\n\n    }\n\n    return PV_SUCCESS;\n}\n\n\n/* ======================================================================== */\n/*  Function : RC_Cleanup                                                   */\n/*  Date     : 12/20/2000                                                   */\n/*  Purpose  : free Rate Control memory                                     */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\n\nPV_STATUS RC_Cleanup(rateControl *rc[], Int numLayers)\n{\n    OSCL_UNUSED_ARG(rc);\n    OSCL_UNUSED_ARG(numLayers);\n\n    return PV_SUCCESS;\n}\n\n\n\n/* ======================================================================== */\n/*  Function : RC_VopQPSetting                                              */\n/*  Date     : 4/11/2001                                                    */\n/*  Purpose  : Reset rate control before coding VOP, moved from vop.c       */\n/*              Compute QP for the whole VOP and initialize MB-based RC\n                reset QPMB[], currVop->quantizer, rc->Ec, video->header_bits */\n/* to          In order to  work RC_VopQPSetting has to do the followings\n                1. Set video->QPMB of all macroblocks.\n                2. Set currVop->quantizer\n                3. Reset video->header_bits to zero.\n                4. Initialize internal RC parameters for Vop cooding        */\n/*  In/out   :                                                              */\n/*  Return   : PV_STATUS                                                    */\n/*  Modified :                                                              */\n/* ======================================================================== */\n/* To be moved to rate_control.c and separate between BX_RC and ANNEX_L     */\n\nPV_STATUS RC_VopQPSetting(VideoEncData *video, rateControl *prc[])\n{\n    Int currLayer = video->currLayer;\n    Vol *currVol = video->vol[currLayer];\n    Vop *currVop = video->currVop;\n#ifdef TEST_MBBASED_QP\n    int i;\n#endif\n\n    rateControl *rc = video->rc[currLayer];\n    MultiPass *pMP = video->pMP[currLayer];\n\n    OSCL_UNUSED_ARG(prc);\n\n    if (video->encParams->RC_Type == CONSTANT_Q)\n    {\n        M4VENC_MEMSET(video->QPMB, currVop->quantizer, sizeof(UChar)*currVol->nTotalMB);\n        return PV_SUCCESS;\n    }\n    else\n    {\n\n        if (video->rc[currLayer]->encoded_frames == 0) /* rc[currLayer]->totalFrameNumber*/\n        {\n            M4VENC_MEMSET(video->QPMB, currVop->quantizer, sizeof(UChar)*currVol->nTotalMB);\n            video->rc[currLayer]->Qc = video->encParams->InitQuantIvop[currLayer];\n        }\n        else\n        {\n            calculateQuantizer_Multipass((void*) video);\n            currVop->quantizer = video->rc[currLayer]->Qc;\n#ifdef TEST_MBBASED_QP\n            i = currVol->nTotalMB;  /* testing changing QP at MB level */\n            while (i)\n            {\n                i--;\n                video->QPMB[i] = (i & 1) ? currVop->quantizer - 1 : currVop->quantizer + 1;\n            }\n#else\n            M4VENC_MEMSET(video->QPMB, currVop->quantizer, sizeof(UChar)*currVol->nTotalMB);\n#endif\n        }\n\n        video->header_bits = 0;\n    }\n\n    /* update pMP->framePos */\n    if (++pMP->framePos == pMP->frameRange) pMP->framePos = 0;\n\n    if (rc->T == 0)\n    {\n        pMP->counter_BTdst = (Int)(video->encParams->LayerFrameRate[video->currLayer] * 7.5 + 0.5); /* 0.75s time frame */\n        pMP->counter_BTdst = PV_MIN(pMP->counter_BTdst, (Int)(rc->max_BitVariance_num / 2 * 0.40)); /* 0.75s time frame may go beyond VBV buffer if we set the buffer size smaller than 0.75s */\n        pMP->counter_BTdst = PV_MAX(pMP->counter_BTdst, (Int)((rc->Bs / 2 - rc->VBV_fullness) * 0.30 / (rc->TMN_TH / 10.0) + 0.5)); /* At least 30% of VBV buffer size/2 */\n        pMP->counter_BTdst = PV_MIN(pMP->counter_BTdst, 20); /* Limit the target to be smaller than 3C */\n\n        pMP->target_bits = rc->T = rc->TMN_TH = (Int)(rc->TMN_TH * (1.0 + pMP->counter_BTdst * 0.1));\n        pMP->diff_counter = pMP->counter_BTdst;\n    }\n\n    /* collect the necessary data: target bits, actual bits, mad and QP */\n    pMP->target_bits = rc->T;\n    pMP->QP  = currVop->quantizer;\n\n    pMP->mad = video->sumMAD / (float)currVol->nTotalMB;\n    if (pMP->mad < MAD_MIN) pMP->mad = MAD_MIN; /* MAD_MIN is defined as 1 in mp4def.h */\n\n    pMP->bitrate = rc->bitrate; /* calculated in RCVopQPSetting */\n    pMP->framerate = rc->framerate;\n\n    /* first pass encoding */\n    pMP->nRe_Quantized = 0;\n\n    return  PV_SUCCESS;\n}\n\n\n/* ======================================================================== */\n/*  Function : SaveRDSamples()                                              */\n/*  Date     : 08/29/2001                                                   */\n/*  History  :                                                              */\n/*  Purpose  : Save QP, actual_bits, mad and R_D of the current iteration   */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nVoid SaveRDSamples(MultiPass *pMP, Int counter_samples)\n{\n    /* for pMP->pRDSamples */\n    pMP->pRDSamples[pMP->framePos][counter_samples].QP    = pMP->QP;\n    pMP->pRDSamples[pMP->framePos][counter_samples].actual_bits = pMP->actual_bits;\n    pMP->pRDSamples[pMP->framePos][counter_samples].mad   = pMP->mad;\n    pMP->pRDSamples[pMP->framePos][counter_samples].R_D = (float)(pMP->actual_bits / (pMP->mad + 0.0001));\n\n    return ;\n}\n/* ======================================================================== */\n/*  Function : RC_VopUpdateStat                                             */\n/*  Date     : 12/20/2000                                                   */\n/*  Purpose  : Update statistics for rate control after encoding each VOP.  */\n/*             No need to change anything in VideoEncData structure.        */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nPV_STATUS RC_VopUpdateStat(VideoEncData *video, rateControl *rc)\n{\n    Int currLayer = video->currLayer;\n    Vol *currVol = video->vol[currLayer];\n    MultiPass *pMP = video->pMP[currLayer];\n    Int diff_BTCounter;\n\n    switch (video->encParams->RC_Type)\n    {\n        case CONSTANT_Q:\n            break;\n\n        case CBR_1:\n        case CBR_2:\n        case VBR_1:\n        case VBR_2:\n        case CBR_LOWDELAY:\n\n            pMP->actual_bits = currVol->stream->byteCount << 3;\n\n            SaveRDSamples(pMP, 0);\n\n            pMP->encoded_frames++;\n\n            /* for pMP->samplesPerFrame */\n            pMP->samplesPerFrame[pMP->framePos] = 0;\n\n            pMP->sum_QP += pMP->QP;\n\n\n            /* update pMP->counter_BTsrc, pMP->counter_BTdst */\n            /* re-allocate the target bit again and then stop encoding */\n            diff_BTCounter = (Int)((float)(rc->TMN_TH - rc->TMN_W - pMP->actual_bits) /\n                                   (pMP->bitrate / (pMP->framerate + 0.0001) + 0.0001) / 0.1);\n            if (diff_BTCounter >= 0)\n                pMP->counter_BTsrc += diff_BTCounter; /* pMP->actual_bits is smaller */\n            else\n                pMP->counter_BTdst -= diff_BTCounter; /* pMP->actual_bits is bigger */\n\n            rc->TMN_TH -= (Int)((float)pMP->bitrate / (pMP->framerate + 0.0001) * (diff_BTCounter * 0.1));\n            rc->T = pMP->target_bits = rc->TMN_TH - rc->TMN_W;\n            pMP->diff_counter -= diff_BTCounter;\n\n            rc->Rc = currVol->stream->byteCount << 3;   /* Total Bits for current frame */\n            rc->Hc = video->header_bits;    /* Total Bits in Header and Motion Vector */\n\n            /* BX_RC */\n            updateRateControl(rc, video);\n\n            break;\n\n        default: /* for case CBR_1/2, VBR_1/2 */\n\n            return PV_FAIL;\n    }\n\n\n    return PV_SUCCESS;\n}\n\n/* ======================================================================== */\n/*  Function : RC_GetSkipNextFrame, RC_GetRemainingVops                     */\n/*  Date     : 2/20/2001                                                    */\n/*  Purpose  : To access RC parameters from other parts of the code.        */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nInt RC_GetSkipNextFrame(VideoEncData *video, Int currLayer)\n{\n    return video->rc[currLayer]->skip_next_frame;\n}\n\nvoid RC_ResetSkipNextFrame(VideoEncData *video, Int currLayer)\n{\n\n    video->rc[currLayer]->skip_next_frame = 0;\n    return ;\n}\n\n/* ======================================================================== */\n/*  Function : RC_UpdateBuffer                                      */\n/*  Date     : 2/20/2001                                                    */\n/*  Purpose  : Update RC in case of there are frames skipped (camera freeze)*/\n/*              from the application level in addition to what RC requested */\n/*  In/out   : Nr, B, Rr                                                    */\n/*  Return   : Void                                                         */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\n\nPV_STATUS RC_UpdateBuffer(VideoEncData *video, Int currLayer, Int num_skip)\n{\n    rateControl *rc  = video->rc[currLayer];\n    MultiPass   *pMP = video->pMP[currLayer];\n\n    if (video == NULL || rc == NULL || pMP == NULL)\n        return PV_FAIL;\n\n    rc->VBV_fullness   -= (Int)(rc->bitrate / rc->framerate * num_skip); //rc[currLayer]->Rp;\n    pMP->counter_BTsrc += 10 * num_skip;\n\n    /* Check buffer underflow */\n    if (rc->VBV_fullness < rc->low_bound)\n    {\n        rc->VBV_fullness = rc->low_bound; // -rc->Bs/2;\n        rc->TMN_W = rc->VBV_fullness - rc->low_bound;\n        pMP->counter_BTsrc = pMP->counter_BTdst + (Int)((float)(rc->Bs / 2 - rc->low_bound) / 2.0 / (pMP->target_bits_per_frame / 10));\n    }\n\n    return PV_SUCCESS;\n}\n\n\n/* ======================================================================== */\n/*  Function : RC_UpdateBXRCParams                                          */\n/*  Date     : 4/08/2002                                                    */\n/*  Purpose  : Update RC parameters specifically for target bitrate or      */\n/*             framerate update during an encoding session                  */\n/*  In/out   :                                                              */\n/*  Return   : PV_TRUE if successed, PV_FALSE if failed.                    */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nPV_STATUS RC_UpdateBXRCParams(void *input)\n{\n    VideoEncData *video = (VideoEncData *) input;\n    VideoEncParams *encParams = video->encParams;\n    rateControl **rc = video->rc;\n    Int numLayers = encParams->nLayers;\n    Int *LayerBitRate = encParams->LayerBitRate;\n    float *LayerFrameRate = encParams->LayerFrameRate;\n    MultiPass **pMP = video->pMP;\n\n    Int n, VBV_fullness;\n    Int diff_counter;\n\n    extern Bool SetProfile_BufferSize(VideoEncData *video, float delay, Int bInitialized);\n\n\n    /* Reset video buffer size due to target bitrate change */\n    SetProfile_BufferSize(video, video->encParams->VBV_delay, 0); /* output: video->encParams->BufferSize[] */\n\n    for (n = 0; n < numLayers; n++)\n    {\n        /* Remaining stuff about frame dropping and underflow check in update RC */\n        updateRC_PostProc(rc[n], video);\n        rc[n]->skip_next_frame = 0; /* must be initialized */\n\n        /* New changes: bitrate and framerate, Bs, max_BitVariance_num, TMN_TH(optional), encoded_frames(optional) */\n        rc[n]->Bs = video->encParams->BufferSize[n];\n        VBV_fullness = (Int)(rc[n]->Bs * 0.5);\n\n        if (n == 0)\n        {\n            rc[n]->TMN_TH = (Int)((float)LayerBitRate[n] / LayerFrameRate[n]);\n            rc[n]->bitrate   = pMP[n]->bitrate   = LayerBitRate[n];\n            rc[n]->framerate = pMP[n]->framerate = LayerFrameRate[n];\n\n            // For h263 or short header mode, the bit variation is within (-2*Rmax*1001/3000, 2*Rmax*1001/3000)\n            if (video->encParams->H263_Enabled)\n            {\n                rc[n]->max_BitVariance_num = (Int)((rc[n]->Bs - video->encParams->maxFrameSize) / 2 / (rc[n]->bitrate / rc[n]->framerate / 10.0)) - 5;\n                //rc[n]->max_BitVariance_num = (Int)((float)(rc[n]->Bs - rc[n]->VBV_fullness)/((float)LayerBitRate[n]/LayerFrameRate[n]/10.0))-5;\n            }\n            else   // MPEG-4 normal modes\n            {\n                rc[n]->max_BitVariance_num = (Int)((float)(rc[n]->Bs - VBV_fullness) * 10 / ((float)LayerBitRate[n] / LayerFrameRate[n])) - 5;\n            }\n        }\n        else\n        {\n            if (LayerFrameRate[n] - LayerFrameRate[n-1] > 0) /*  7/31/03 */\n            {\n                rc[n]->TMN_TH = (Int)((float)(LayerBitRate[n] - LayerBitRate[n-1]) / (LayerFrameRate[n] - LayerFrameRate[n-1]));\n                rc[n]->max_BitVariance_num = (Int)((float)(rc[n]->Bs - VBV_fullness) * 10 / ((float)rc[n]->TMN_TH)) - 5;\n                if (rc[n]->max_BitVariance_num < 0) rc[n]->max_BitVariance_num += 5;\n            }\n            else   /*  7/31/03 */\n            {\n                rc[n]->TMN_TH = 1 << 30;\n                rc[n]->max_BitVariance_num = 0;\n            }\n            rc[n]->bitrate   = pMP[n]->bitrate   = LayerBitRate[n] - LayerBitRate[n-1];\n            rc[n]->framerate = pMP[n]->framerate = LayerFrameRate[n] - LayerFrameRate[n-1];\n        }\n\n        pMP[n]->target_bits_per_frame_prev = pMP[n]->target_bits_per_frame;\n        pMP[n]->target_bits_per_frame = pMP[n]->bitrate / (float)(pMP[n]->framerate + 0.0001);  /*  7/31/03 */\n\n        /* rc[n]->VBV_fullness and rc[n]->TMN_W should be kept same */\n        /* update pMP[n]->counter_BTdst and pMP[n]->counter_BTsrc   */\n        diff_counter = (Int)((float)(rc[n]->VBV_fullness - rc[n]->TMN_W) /\n                             (pMP[n]->target_bits_per_frame / 10 + 0.0001)); /*  7/31/03 */\n\n        pMP[n]->counter_BTdst = pMP[n]->counter_BTsrc = 0;\n        if (diff_counter > 0)\n            pMP[n]->counter_BTdst = diff_counter;\n\n        else if (diff_counter < 0)\n            pMP[n]->counter_BTsrc = -diff_counter;\n\n        rc[n]->TMN_W = (Int)(rc[n]->VBV_fullness -      /* re-calculate rc[n]->TMN_W in order for higher accuracy */\n                             (pMP[n]->target_bits_per_frame / 10) * (pMP[n]->counter_BTdst - pMP[n]->counter_BTsrc));\n\n        /* Keep the current average mad */\n        if (pMP[n]->aver_mad != 0)\n        {\n            pMP[n]->aver_mad_prev = pMP[n]->aver_mad;\n            pMP[n]->encoded_frames_prev = pMP[n]->encoded_frames;\n        }\n\n        pMP[n]->aver_mad = 0;\n        pMP[n]->overlapped_win_size = 4;\n\n        /* Misc */\n        pMP[n]->sum_mad = pMP[n]->sum_QP = 0;\n        //pMP[n]->encoded_frames_prev = pMP[n]->encoded_frames;\n        pMP[n]->encoded_frames = pMP[n]->re_encoded_frames = pMP[n]->re_encoded_times = 0;\n\n    } /* end of: for(n=0; n<numLayers; n++) */\n\n    return PV_SUCCESS;\n\n}\n\n\n/* ================================================================================ */\n/*  Function : targetBitCalculation                                                 */\n/*  Date     : 10/01/2001                                                           */\n/*  Purpose  : quadratic bit allocation model: T(n) = C*sqrt(mad(n)/aver_mad(n-1))  */\n/*                                                                                  */\n/*  In/out   : rc->T                                                                */\n/*  Return   : Void                                                                 */\n/*  Modified :                                                                      */\n/* ================================================================================ */\n\nvoid targetBitCalculation(void *input)\n{\n    VideoEncData *video = (VideoEncData *) input;\n    MultiPass *pMP = video->pMP[video->currLayer];\n    Vol *currVol = video->vol[video->currLayer];\n    rateControl *rc = video->rc[video->currLayer];\n\n    float curr_mad;//, average_mad;\n    Int diff_counter_BTsrc, diff_counter_BTdst, prev_counter_diff, curr_counter_diff, bound;\n    /* BT = Bit Transfer, for pMP->counter_BTsrc, pMP->counter_BTdst */\n\n    if (video == NULL || currVol == NULL || pMP == NULL || rc == NULL)\n        return;\n\n    /* some stuff about frame dropping remained here to be done because pMP cannot be inserted into updateRateControl()*/\n    updateRC_PostProc(rc, video);\n\n    /* update pMP->counter_BTsrc and pMP->counter_BTdst to avoid interger overflow */\n    if (pMP->counter_BTsrc > 1000 && pMP->counter_BTdst > 1000)\n    {\n        pMP->counter_BTsrc -= 1000;\n        pMP->counter_BTdst -= 1000;\n    }\n\n    /* ---------------------------------------------------------------------------------------------------*/\n    /* target calculation */\n    curr_mad = video->sumMAD / (float)currVol->nTotalMB;\n    if (curr_mad < MAD_MIN) curr_mad = MAD_MIN; /* MAD_MIN is defined as 1 in mp4def.h */\n    diff_counter_BTsrc = diff_counter_BTdst = 0;\n    pMP->diff_counter = 0;\n\n\n    /*1.calculate average mad */\n    pMP->sum_mad += curr_mad;\n    //average_mad = (pMP->encoded_frames < 1 ? curr_mad : pMP->sum_mad/(float)(pMP->encoded_frames+1)); /* this function is called from the scond encoded frame*/\n    //pMP->aver_mad = average_mad;\n    if (pMP->encoded_frames >= 0) /* pMP->encoded_frames is set to -1 initially, so forget about the very first I frame */\n        pMP->aver_mad = (pMP->aver_mad * pMP->encoded_frames + curr_mad) / (pMP->encoded_frames + 1);\n\n    if (pMP->overlapped_win_size > 0 && pMP->encoded_frames_prev >= 0)  /*  7/31/03 */\n        pMP->aver_mad_prev = (pMP->aver_mad_prev * pMP->encoded_frames_prev + curr_mad) / (pMP->encoded_frames_prev + 1);\n\n    /*2.average_mad, mad ==> diff_counter_BTsrc, diff_counter_BTdst */\n    if (pMP->overlapped_win_size == 0)\n    {\n        /* original verison */\n        if (curr_mad > pMP->aver_mad*1.1)\n        {\n            if (curr_mad / (pMP->aver_mad + 0.0001) > 2)\n                diff_counter_BTdst = (Int)(M4VENC_SQRT(curr_mad / (pMP->aver_mad + 0.0001)) * 10 + 0.4) - 10;\n            //diff_counter_BTdst = (Int)((sqrt(curr_mad/pMP->aver_mad)*2+curr_mad/pMP->aver_mad)/(3*0.1) + 0.4) - 10;\n            else\n                diff_counter_BTdst = (Int)(curr_mad / (pMP->aver_mad + 0.0001) * 10 + 0.4) - 10;\n        }\n        else /* curr_mad <= average_mad*1.1 */\n            //diff_counter_BTsrc = 10 - (Int)((sqrt(curr_mad/pMP->aver_mad) + pow(curr_mad/pMP->aver_mad, 1.0/3.0))/(2.0*0.1) + 0.4);\n            diff_counter_BTsrc = 10 - (Int)(M4VENC_SQRT(curr_mad / (pMP->aver_mad + 0.0001)) * 10 + 0.5);\n        //diff_counter_BTsrc = 10 - (Int)(curr_mad/pMP->aver_mad/0.1 + 0.5)\n\n        /* actively fill in the possible gap */\n        if (diff_counter_BTsrc == 0 && diff_counter_BTdst == 0 &&\n                curr_mad <= pMP->aver_mad*1.1 && pMP->counter_BTsrc < pMP->counter_BTdst)\n            diff_counter_BTsrc = 1;\n\n    }\n    else if (pMP->overlapped_win_size > 0)\n    {\n        /* transition time: use previous average mad \"pMP->aver_mad_prev\" instead of the current average mad \"pMP->aver_mad\" */\n        if (curr_mad > pMP->aver_mad_prev*1.1)\n        {\n            if (curr_mad / pMP->aver_mad_prev > 2)\n                diff_counter_BTdst = (Int)(M4VENC_SQRT(curr_mad / (pMP->aver_mad_prev + 0.0001)) * 10 + 0.4) - 10;\n            //diff_counter_BTdst = (Int)((M4VENC_SQRT(curr_mad/pMP->aver_mad_prev)*2+curr_mad/pMP->aver_mad_prev)/(3*0.1) + 0.4) - 10;\n            else\n                diff_counter_BTdst = (Int)(curr_mad / (pMP->aver_mad_prev + 0.0001) * 10 + 0.4) - 10;\n        }\n        else /* curr_mad <= average_mad*1.1 */\n            //diff_counter_BTsrc = 10 - (Int)((sqrt(curr_mad/pMP->aver_mad_prev) + pow(curr_mad/pMP->aver_mad_prev, 1.0/3.0))/(2.0*0.1) + 0.4);\n            diff_counter_BTsrc = 10 - (Int)(M4VENC_SQRT(curr_mad / (pMP->aver_mad_prev + 0.0001)) * 10 + 0.5);\n        //diff_counter_BTsrc = 10 - (Int)(curr_mad/pMP->aver_mad_prev/0.1 + 0.5)\n\n        /* actively fill in the possible gap */\n        if (diff_counter_BTsrc == 0 && diff_counter_BTdst == 0 &&\n                curr_mad <= pMP->aver_mad_prev*1.1 && pMP->counter_BTsrc < pMP->counter_BTdst)\n            diff_counter_BTsrc = 1;\n\n        if (--pMP->overlapped_win_size <= 0)    pMP->overlapped_win_size = 0;\n    }\n\n\n    /* if difference is too much, do clipping */\n    /* First, set the upper bound for current bit allocation variance: 80% of available buffer */\n    bound = (Int)((rc->Bs / 2 - rc->VBV_fullness) * 0.6 / (pMP->target_bits_per_frame / 10)); /* rc->Bs */\n    diff_counter_BTsrc =  PV_MIN(diff_counter_BTsrc, bound);\n    diff_counter_BTdst =  PV_MIN(diff_counter_BTdst, bound);\n\n    /* Second, set another upper bound for current bit allocation: 4-5*bitrate/framerate */\n    bound = 50;\n//  if(video->encParams->RC_Type == CBR_LOWDELAY)\n//  not necessary       bound = 10;     /*  1/17/02 -- For Low delay */\n\n    diff_counter_BTsrc =  PV_MIN(diff_counter_BTsrc, bound);\n    diff_counter_BTdst =  PV_MIN(diff_counter_BTdst, bound);\n\n\n    /* Third, check the buffer */\n    prev_counter_diff = pMP->counter_BTdst - pMP->counter_BTsrc;\n    curr_counter_diff = prev_counter_diff + (diff_counter_BTdst - diff_counter_BTsrc);\n\n    if (PV_ABS(prev_counter_diff) >= rc->max_BitVariance_num || PV_ABS(curr_counter_diff) >= rc->max_BitVariance_num) // PV_ABS(curr_counter_diff) >= PV_ABS(prev_counter_diff) )\n    {   //diff_counter_BTsrc = diff_counter_BTdst = 0;\n\n        if (curr_counter_diff > rc->max_BitVariance_num && diff_counter_BTdst)\n        {\n            diff_counter_BTdst = (rc->max_BitVariance_num - prev_counter_diff) + diff_counter_BTsrc;\n            if (diff_counter_BTdst < 0) diff_counter_BTdst = 0;\n        }\n\n        else if (curr_counter_diff < -rc->max_BitVariance_num && diff_counter_BTsrc)\n        {\n            diff_counter_BTsrc = diff_counter_BTdst - (-rc->max_BitVariance_num - prev_counter_diff);\n            if (diff_counter_BTsrc < 0) diff_counter_BTsrc = 0;\n        }\n    }\n\n\n    /*3.diff_counter_BTsrc, diff_counter_BTdst ==> TMN_TH */\n    //rc->TMN_TH = (Int)((float)pMP->bitrate/pMP->framerate);\n    rc->TMN_TH = (Int)(pMP->target_bits_per_frame);\n    pMP->diff_counter = 0;\n\n    if (diff_counter_BTsrc)\n    {\n        rc->TMN_TH -= (Int)(pMP->target_bits_per_frame * diff_counter_BTsrc * 0.1);\n        pMP->diff_counter = -diff_counter_BTsrc;\n    }\n    else if (diff_counter_BTdst)\n    {\n        rc->TMN_TH += (Int)(pMP->target_bits_per_frame * diff_counter_BTdst * 0.1);\n        pMP->diff_counter = diff_counter_BTdst;\n    }\n\n\n    /*4.update pMP->counter_BTsrc, pMP->counter_BTdst */\n    pMP->counter_BTsrc += diff_counter_BTsrc;\n    pMP->counter_BTdst += diff_counter_BTdst;\n\n\n    /*5.target bit calculation */\n    rc->T = rc->TMN_TH - rc->TMN_W;\n    //rc->T = rc->TMN_TH - (Int)((float)rc->TMN_W/rc->frameRate);\n\n    if (video->encParams->H263_Enabled && rc->T > video->encParams->maxFrameSize)\n    {\n        rc->T = video->encParams->maxFrameSize;  //  added this 11/07/05\n    }\n\n}\n\n/* ================================================================================ */\n/*  Function : calculateQuantizer_Multipass                                         */\n/*  Date     : 10/01/2001                                                           */\n/*  Purpose  : variable rate bit allocation + new QP determination scheme           */\n/*                                                                                  */\n/*  In/out   : rc->T and rc->Qc                                                     */\n/*  Return   : Void                                                                 */\n/*  Modified :                                                                      */\n/* ================================================================================ */\n\n/* Mad based variable bit allocation + QP calculation with a new quadratic method */\nvoid calculateQuantizer_Multipass(void *input)\n{\n    VideoEncData *video = (VideoEncData *) input;\n    MultiPass *pMP = video->pMP[video->currLayer];\n    Vol *currVol = video->vol[video->currLayer];\n    rateControl *rc = video->rc[video->currLayer];\n\n    Int prev_QP, prev_actual_bits, curr_target, i, j;\n\n    float curr_mad, prev_mad, curr_RD, prev_RD, average_mad, aver_QP;\n\n\n    if (video == NULL || currVol == NULL || pMP == NULL || rc == NULL)\n        return;\n\n    /* Mad based variable bit allocation */\n    targetBitCalculation((void*) video);\n\n    if (rc->T <= 0 || video->sumMAD == 0)\n    {\n        if (rc->T < 0)  rc->Qc = 31;\n        return;\n    }\n\n    /* ---------------------------------------------------------------------------------------------------*/\n    /* current frame QP estimation */\n    curr_target = rc->T;\n    curr_mad = video->sumMAD / (float)currVol->nTotalMB;\n    if (curr_mad < MAD_MIN) curr_mad = MAD_MIN; /* MAD_MIN is defined as 1 in mp4def.h */\n    curr_RD  = (float)curr_target / curr_mad;\n\n    /* Another version of search the optimal point */\n    prev_actual_bits = pMP->pRDSamples[0][0].actual_bits;\n    prev_mad = pMP->pRDSamples[0][0].mad;\n\n    for (i = 0, j = 0; i < pMP->frameRange; i++)\n    {\n        if (pMP->pRDSamples[i][0].mad != 0 && prev_mad != 0 &&\n                PV_ABS(prev_mad - curr_mad) > PV_ABS(pMP->pRDSamples[i][0].mad - curr_mad))\n        {\n            prev_mad = pMP->pRDSamples[i][0].mad;\n            prev_actual_bits = pMP->pRDSamples[i][0].actual_bits;\n            j = i;\n        }\n    }\n    prev_QP = pMP->pRDSamples[j][0].QP;\n    for (i = 1; i < pMP->samplesPerFrame[j]; i++)\n    {\n        if (PV_ABS(prev_actual_bits - curr_target) > PV_ABS(pMP->pRDSamples[j][i].actual_bits - curr_target))\n        {\n            prev_actual_bits = pMP->pRDSamples[j][i].actual_bits;\n            prev_QP = pMP->pRDSamples[j][i].QP;\n        }\n    }\n\n    // quadratic approximation\n    prev_RD = (float)prev_actual_bits / prev_mad;\n    //rc->Qc = (Int)(prev_QP * sqrt(prev_actual_bits/curr_target) + 0.4);\n    if (prev_QP == 1) // 11/14/05, added this to allow getting out of QP = 1 easily\n    {\n        rc->Qc = (Int)(prev_RD / curr_RD + 0.5);\n    }\n    else\n    {\n        rc->Qc = (Int)(prev_QP * M4VENC_SQRT(prev_RD / curr_RD) + 0.9);\n\n        if (prev_RD / curr_RD > 0.5 && prev_RD / curr_RD < 2.0)\n            rc->Qc = (Int)(prev_QP * (M4VENC_SQRT(prev_RD / curr_RD) + prev_RD / curr_RD) / 2.0 + 0.9); /* Quadratic and linear approximation */\n        else\n            rc->Qc = (Int)(prev_QP * (M4VENC_SQRT(prev_RD / curr_RD) + M4VENC_POW(prev_RD / curr_RD, 1.0 / 3.0)) / 2.0 + 0.9);\n    }\n    //rc->Qc =(Int)(prev_QP * sqrt(prev_RD/curr_RD) + 0.4);\n    // 11/08/05\n    // lower bound on Qc should be a function of curr_mad\n    // When mad is already low, lower bound on Qc doesn't have to be small.\n    // Note, this doesn't work well for low complexity clip encoded at high bit rate\n    // it doesn't hit the target bit rate due to this QP lower bound.\n/// if((curr_mad < 8) && (rc->Qc < 12)) rc->Qc = 12;\n//  else    if((curr_mad < 128) && (rc->Qc < 3)) rc->Qc = 3;\n\n    if (rc->Qc < 1) rc->Qc = 1;\n    if (rc->Qc > 31)    rc->Qc = 31;\n\n\n    /* active bit resource protection */\n    aver_QP = (pMP->encoded_frames == 0 ? 0 : pMP->sum_QP / (float)pMP->encoded_frames);\n    average_mad = (pMP->encoded_frames == 0 ? 0 : pMP->sum_mad / (float)pMP->encoded_frames); /* this function is called from the scond encoded frame*/\n    if (pMP->diff_counter == 0 &&\n            ((float)rc->Qc <= aver_QP*1.1 || curr_mad <= average_mad*1.1) &&\n            pMP->counter_BTsrc <= (pMP->counter_BTdst + (Int)(pMP->framerate*1.0 + 0.5)))\n    {\n        rc->TMN_TH -= (Int)(pMP->target_bits_per_frame / 10.0);\n        rc->T = rc->TMN_TH - rc->TMN_W;\n        pMP->counter_BTsrc++;\n        pMP->diff_counter--;\n    }\n\n}\n\n\n/* ======================================================================== */\n/*  Function : updateRateControl                                            */\n/*  Date     : 11/17/2000                                                   */\n/*  Purpose  :Update the RD Modal (After Encoding the Current Frame)        */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nvoid updateRateControl(rateControl *rc, VideoEncData *video)\n{\n    Int  frame_bits;\n\n\n    /* rate contro\\l */\n    frame_bits = (Int)(rc->bitrate / rc->framerate);\n    rc->TMN_W += (rc->Rc - rc->TMN_TH);\n    rc->VBV_fullness += (rc->Rc - frame_bits); //rc->Rp);\n    //if(rc->VBV_fullness < 0) rc->VBV_fullness = -1;\n\n    rc->encoded_frames++;\n\n    /* frame dropping */\n    rc->skip_next_frame = 0;\n\n    if ((video->encParams->H263_Enabled && rc->Rc > video->encParams->maxFrameSize) || /*  For H263/short header mode, drop the frame if the actual frame size exceeds the bound */\n            (rc->VBV_fullness > rc->Bs / 2 && !rc->no_pre_skip)) /* skip the current frame */ /* rc->Bs */\n    {\n        rc->TMN_W -= (rc->Rc - rc->TMN_TH);\n        rc->VBV_fullness -= rc->Rc;\n        rc->skip_next_frame = -1;\n    }\n    else if ((float)(rc->VBV_fullness - rc->VBV_fullness_offset) > (rc->Bs / 2 - rc->VBV_fullness_offset)*0.95 &&\n             !rc->no_frame_skip) /* skip next frame */\n    {\n        rc->VBV_fullness -= frame_bits; //rc->Rp;\n        rc->skip_next_frame = 1;\n        /*  skip more than 1 frames  */\n        //while(rc->VBV_fullness > rc->Bs*0.475)\n        while ((rc->VBV_fullness - rc->VBV_fullness_offset) > (rc->Bs / 2 - rc->VBV_fullness_offset)*0.95)\n        {\n            rc->VBV_fullness -= frame_bits; //rc->Rp;\n            rc->skip_next_frame++;\n        }\n        /* END  */\n    }\n\n}\n\n/* ======================================================================== */\n/*  Function : updateRC_PostProc                                            */\n/*  Date     : 04/08/2002                                                   */\n/*  Purpose  : Remaing RC update stuff for frame skip and buffer underflow  */\n/*             check                                                        */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nvoid updateRC_PostProc(rateControl *rc, VideoEncData *video)\n{\n    MultiPass *pMP = video->pMP[video->currLayer];\n\n    if (rc->skip_next_frame == 1 && !rc->no_frame_skip) /* skip next frame */\n    {\n        pMP->counter_BTsrc += 10 * rc->skip_next_frame;\n\n    }\n    else if (rc->skip_next_frame == -1 && !rc->no_pre_skip) /* skip current frame */\n    {\n        pMP->counter_BTdst -= pMP->diff_counter;\n        pMP->counter_BTsrc += 10;\n\n        pMP->sum_mad -= pMP->mad;\n        pMP->aver_mad = (pMP->aver_mad * pMP->encoded_frames - pMP->mad) / (float)(pMP->encoded_frames - 1 + 0.0001);\n        pMP->sum_QP  -= pMP->QP;\n        pMP->encoded_frames --;\n    }\n    /* some stuff in update VBV_fullness remains here */\n    //if(rc->VBV_fullness < -rc->Bs/2) /* rc->Bs */\n    if (rc->VBV_fullness < rc->low_bound)\n    {\n        rc->VBV_fullness = rc->low_bound; // -rc->Bs/2;\n        rc->TMN_W = rc->VBV_fullness - rc->low_bound;\n        pMP->counter_BTsrc = pMP->counter_BTdst + (Int)((float)(rc->Bs / 2 - rc->low_bound) / 2.0 / (pMP->target_bits_per_frame / 10));\n    }\n}\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/rate_control.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef _RATE_CONTROL_H_\n#define _RATE_CONTROL_H_\n\n#include \"mp4def.h\"\n\ntypedef struct tagdataPointArray\n{\n    Int Qp;\n    Int Rp;\n    float Mp;   /* for MB-based RC, 3/14/01 */\n    struct tagdataPointArray *next;\n    struct tagdataPointArray *prev;\n} dataPointArray;\n\n\ntypedef struct\n{\n    Int alpha;  /* weight for I frame */\n    Int Rs;     /*bit rate for the sequence (or segment) e.g., 24000 bits/sec */\n    Int Rc;     /*bits used for the current frame. It is the bit count obtained after encoding. */\n    Int Rp;     /*bits to be removed from the buffer per picture. */\n    /*? is this the average one, or just the bits coded for the previous frame */\n    Int Rps;    /*bit to be removed from buffer per src frame */\n    float Ts;   /*number of seconds for the sequence  (or segment). e.g., 10 sec */\n    float Ep;\n    float Ec;   /*mean absolute difference for the current frame after motion compensation.*/\n    /*If the macroblock is intra coded, the original spatial pixel values are summed.*/\n    Int Qc;     /*quantization level used for the current frame. */\n    Int Nr;     /*number of P frames remaining for encoding.*/\n    Int Rr; /*number of bits remaining for encoding this sequence (or segment).*/\n    Int Rr_Old;/* 12/24/00 */\n    Int T;      /*target bit to be used for the current frame.*/\n    Int S;      /*number of bits used for encoding the previous frame.*/\n    Int Hc; /*header and motion vector bits used in the current frame. It includes all the  information except to the residual information.*/\n    Int Hp; /*header and motion vector bits used in the previous frame. It includes all the     information except to the residual information.*/\n    Int Ql; /*quantization level used in the previous frame */\n    Int Bs; /*buffer size e.g., R/2 */\n    Int B;      /*current buffer level e.g., R/4 - start from the middle of the buffer */\n    float X1;\n    float X2;\n    float X11;\n    float M;            /*safe margin for the buffer */\n    float smTick;    /*ratio of src versus enc frame rate */\n    double remnant;  /*remainder frame of src/enc frame for fine frame skipping */\n    Int timeIncRes; /* vol->timeIncrementResolution */\n\n    dataPointArray   *end; /*quantization levels for the past (20) frames */\n\n    Int     frameNumber; /* ranging from 0 to 20 nodes*/\n    Int     w;\n    Int     Nr_Original;\n    Int     Nr_Old, Nr_Old2;\n    Int     skip_next_frame;\n    Int     Qdep;       /* smooth Q adjustment */\n    Int     fine_frame_skip;\n    Int     VBR_Enabled;\n    Int     no_frame_skip;\n    Int     no_pre_skip;\n\n    Int totalFrameNumber; /* total coded frames, for debugging!!*/\n\n    char    oFirstTime;\n\n    /* BX rate control */\n    Int     TMN_W;\n    Int     TMN_TH;\n    Int     VBV_fullness;\n    Int     max_BitVariance_num; /* the number of the maximum bit variance within the given buffer with the unit of 10% of bitrate/framerate*/\n    Int     encoded_frames; /* counter for all encoded frames */\n    float   framerate;\n    Int     bitrate;\n    Int     low_bound;              /* bound for underflow detection, usually low_bound=-Bs/2, but could be changed in H.263 mode */\n    Int     VBV_fullness_offset;    /* offset of VBV_fullness, usually is zero, but can be changed in H.263 mode*/\n    /* End BX */\n\n} rateControl;\n\n\n#endif /* _RATE_CONTROL_H_ */\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/sad.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"oscl_base_macros.h\"  // for OSCL_UNUSED_ARG \n#include \"mp4def.h\"\n#include \"mp4lib_int.h\"\n\n#include \"sad_inline.h\"\n\n#define Cached_lx 176\n\n#ifdef _SAD_STAT\nULong num_sad_MB = 0;\nULong num_sad_Blk = 0;\nULong num_sad_MB_call = 0;\nULong num_sad_Blk_call = 0;\n\n#define NUM_SAD_MB_CALL()       num_sad_MB_call++\n#define NUM_SAD_MB()            num_sad_MB++\n#define NUM_SAD_BLK_CALL()      num_sad_Blk_call++\n#define NUM_SAD_BLK()           num_sad_Blk++\n\n#else\n\n#define NUM_SAD_MB_CALL()\n#define NUM_SAD_MB()\n#define NUM_SAD_BLK_CALL()\n#define NUM_SAD_BLK()\n\n#endif\n\n\n/* consist of\nInt SAD_Macroblock_C(UChar *ref,UChar *blk,Int dmin,Int lx,void *extra_info)\nInt SAD_MB_HTFM_Collect(UChar *ref,UChar *blk,Int dmin,Int lx,void *extra_info)\nInt SAD_MB_HTFM(UChar *ref,UChar *blk,Int dmin,Int lx,void *extra_info)\nInt SAD_Block_C(UChar *ref,UChar *blk,Int dmin,Int lx,void *extra_info)\nInt SAD_Blk_PADDING(UChar *ref,UChar *cur,Int dmin,Int lx,void *extra_info)\nInt SAD_MB_PADDING(UChar *ref,UChar *cur,Int dmin,Int lx,void *extra_info)\nInt SAD_MB_PAD1(UChar *ref,UChar *cur,Int dmin,Int lx,Int *rep);\nInt SAD_MB_PADDING_HTFM_Collect(UChar *ref,UChar *cur,Int dmin,Int lx,void *extra_info)\nInt SAD_MB_PADDING_HTFM(UChar *ref,UChar *cur,Int dmin,Int lx,void *vptr)\n*/\n\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n    Int SAD_MB_PAD1(UChar *ref, UChar *cur, Int dmin, Int lx, Int *rep);\n\n\n    /*==================================================================\n        Function:   SAD_Macroblock\n        Date:       09/07/2000\n        Purpose:    Compute SAD 16x16 between blk and ref.\n        To do:      Uniform subsampling will be inserted later!\n                    Hypothesis Testing Fast Matching to be used later!\n        Changes:\n    11/7/00:     implemented MMX\n    1/24/01:     implemented SSE\n    ==================================================================*/\n    /********** C ************/\n    Int SAD_Macroblock_C(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info)\n    {\n        int32 x10;\n        Int dmin = (ULong)dmin_lx >> 16;\n        Int lx = dmin_lx & 0xFFFF;\n\n        OSCL_UNUSED_ARG(extra_info);\n\n        NUM_SAD_MB_CALL();\n\n        x10 = simd_sad_mb(ref, blk, dmin, lx);\n\n        return x10;\n    }\n\n#ifdef HTFM   /* HTFM with uniform subsampling implementation, 2/28/01 */\n    /*===============================================================\n        Function:   SAD_MB_HTFM_Collect and SAD_MB_HTFM\n        Date:       3/2/1\n        Purpose:    Compute the SAD on a 16x16 block using\n                    uniform subsampling and hypothesis testing fast matching\n                    for early dropout. SAD_MB_HP_HTFM_Collect is to collect\n                    the statistics to compute the thresholds to be used in\n                    SAD_MB_HP_HTFM.\n        Input/Output:\n        Changes:\n      ===============================================================*/\n\n    Int SAD_MB_HTFM_Collect(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info)\n    {\n        Int i;\n        Int sad = 0;\n        UChar *p1;\n        Int lx4 = (dmin_lx << 2) & 0x3FFFC;\n        ULong cur_word;\n        Int saddata[16], tmp, tmp2;    /* used when collecting flag (global) is on */\n        Int difmad;\n        HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info;\n        Int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg);\n        UInt *countbreak = &(htfm_stat->countbreak);\n        Int *offsetRef = htfm_stat->offsetRef;\n\n        NUM_SAD_MB_CALL();\n\n        blk -= 4;\n        for (i = 0; i < 16; i++)\n        {\n            p1 = ref + offsetRef[i];\n            cur_word = *((ULong*)(blk += 4));\n            tmp = p1[12];\n            tmp2 = (cur_word >> 24) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[8];\n            tmp2 = (cur_word >> 16) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[4];\n            tmp2 = (cur_word >> 8) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[0];\n            p1 += lx4;\n            tmp2 = (cur_word & 0xFF);\n            sad = SUB_SAD(sad, tmp, tmp2);\n\n            cur_word = *((ULong*)(blk += 4));\n            tmp = p1[12];\n            tmp2 = (cur_word >> 24) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[8];\n            tmp2 = (cur_word >> 16) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[4];\n            tmp2 = (cur_word >> 8) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[0];\n            p1 += lx4;\n            tmp2 = (cur_word & 0xFF);\n            sad = SUB_SAD(sad, tmp, tmp2);\n\n            cur_word = *((ULong*)(blk += 4));\n            tmp = p1[12];\n            tmp2 = (cur_word >> 24) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[8];\n            tmp2 = (cur_word >> 16) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[4];\n            tmp2 = (cur_word >> 8) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[0];\n            p1 += lx4;\n            tmp2 = (cur_word & 0xFF);\n            sad = SUB_SAD(sad, tmp, tmp2);\n\n            cur_word = *((ULong*)(blk += 4));\n            tmp = p1[12];\n            tmp2 = (cur_word >> 24) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[8];\n            tmp2 = (cur_word >> 16) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[4];\n            tmp2 = (cur_word >> 8) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[0];\n            p1 += lx4;\n            tmp2 = (cur_word & 0xFF);\n            sad = SUB_SAD(sad, tmp, tmp2);\n\n            NUM_SAD_MB();\n\n            saddata[i] = sad;\n\n            if (i > 0)\n            {\n                if ((ULong)sad > ((ULong)dmin_lx >> 16))\n                {\n                    difmad = saddata[0] - ((saddata[1] + 1) >> 1);\n                    (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);\n                    (*countbreak)++;\n                    return sad;\n                }\n            }\n        }\n\n        difmad = saddata[0] - ((saddata[1] + 1) >> 1);\n        (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);\n        (*countbreak)++;\n        return sad;\n    }\n\n    Int SAD_MB_HTFM(UChar *ref, UChar *blk, Int dmin_lx, void *extra_info)\n    {\n        Int sad = 0;\n        UChar *p1;\n\n        Int i;\n        Int tmp, tmp2;\n        Int lx4 = (dmin_lx << 2) & 0x3FFFC;\n        Int sadstar = 0, madstar;\n        Int *nrmlz_th = (Int*) extra_info;\n        Int *offsetRef = (Int*) extra_info + 32;\n        ULong cur_word;\n\n        madstar = (ULong)dmin_lx >> 20;\n\n        NUM_SAD_MB_CALL();\n\n        blk -= 4;\n        for (i = 0; i < 16; i++)\n        {\n            p1 = ref + offsetRef[i];\n            cur_word = *((ULong*)(blk += 4));\n            tmp = p1[12];\n            tmp2 = (cur_word >> 24) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[8];\n            tmp2 = (cur_word >> 16) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[4];\n            tmp2 = (cur_word >> 8) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[0];\n            p1 += lx4;\n            tmp2 = (cur_word & 0xFF);\n            sad = SUB_SAD(sad, tmp, tmp2);\n\n            cur_word = *((ULong*)(blk += 4));\n            tmp = p1[12];\n            tmp2 = (cur_word >> 24) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[8];\n            tmp2 = (cur_word >> 16) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[4];\n            tmp2 = (cur_word >> 8) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[0];\n            p1 += lx4;\n            tmp2 = (cur_word & 0xFF);\n            sad = SUB_SAD(sad, tmp, tmp2);\n\n            cur_word = *((ULong*)(blk += 4));\n            tmp = p1[12];\n            tmp2 = (cur_word >> 24) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[8];\n            tmp2 = (cur_word >> 16) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[4];\n            tmp2 = (cur_word >> 8) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[0];\n            p1 += lx4;\n            tmp2 = (cur_word & 0xFF);\n            sad = SUB_SAD(sad, tmp, tmp2);\n\n            cur_word = *((ULong*)(blk += 4));\n            tmp = p1[12];\n            tmp2 = (cur_word >> 24) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[8];\n            tmp2 = (cur_word >> 16) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[4];\n            tmp2 = (cur_word >> 8) & 0xFF;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = p1[0];\n            p1 += lx4;\n            tmp2 = (cur_word & 0xFF);\n            sad = SUB_SAD(sad, tmp, tmp2);\n\n            NUM_SAD_MB();\n\n            sadstar += madstar;\n            if (((ULong)sad <= ((ULong)dmin_lx >> 16)) && (sad <= (sadstar - *nrmlz_th++)))\n                ;\n            else\n                return 65536;\n        }\n\n        return sad;\n    }\n#endif /* HTFM */\n\n#ifndef NO_INTER4V\n    /*==================================================================\n        Function:   SAD_Block\n        Date:       09/07/2000\n        Purpose:    Compute SAD 16x16 between blk and ref.\n        To do:      Uniform subsampling will be inserted later!\n                    Hypothesis Testing Fast Matching to be used later!\n        Changes:\n    11/7/00:     implemented MMX\n    1/24/01:     implemented SSE\n      ==================================================================*/\n    /********** C ************/\n    Int SAD_Block_C(UChar *ref, UChar *blk, Int dmin, Int lx, void *)\n    {\n        Int sad = 0;\n\n        Int i;\n        UChar *ii;\n        Int *kk;\n        Int tmp, tmp2, tmp3, mask = 0xFF;\n        Int width = (lx - 32);\n\n        NUM_SAD_BLK_CALL();\n\n        ii = ref;\n        kk  = (Int*)blk; /* assuming word-align for blk */\n        for (i = 0; i < 8; i++)\n        {\n            tmp3 = kk[1];\n            tmp = ii[7];\n            tmp2 = (UInt)tmp3 >> 24;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = ii[6];\n            tmp2 = (tmp3 >> 16) & mask;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = ii[5];\n            tmp2 = (tmp3 >> 8) & mask;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = ii[4];\n            tmp2 = tmp3 & mask;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp3 = *kk;\n            kk += (width >> 2);\n            tmp = ii[3];\n            tmp2 = (UInt)tmp3 >> 24;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = ii[2];\n            tmp2 = (tmp3 >> 16) & mask;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = ii[1];\n            tmp2 = (tmp3 >> 8) & mask;\n            sad = SUB_SAD(sad, tmp, tmp2);\n            tmp = *ii;\n            ii += lx;\n            tmp2 = tmp3 & mask;\n            sad = SUB_SAD(sad, tmp, tmp2);\n\n            NUM_SAD_BLK();\n\n            if (sad > dmin)\n                return sad;\n        }\n\n        return sad;\n    }\n\n#endif /* NO_INTER4V */\n\n#ifdef __cplusplus\n}\n#endif\n\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/sad_halfpel.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/* contains\nInt HalfPel1_SAD_MB(UChar *ref,UChar *blk,Int dmin,Int width,Int ih,Int jh)\nInt HalfPel2_SAD_MB(UChar *ref,UChar *blk,Int dmin,Int width)\nInt HalfPel1_SAD_Blk(UChar *ref,UChar *blk,Int dmin,Int width,Int ih,Int jh)\nInt HalfPel2_SAD_Blk(UChar *ref,UChar *blk,Int dmin,Int width)\n\nInt SAD_MB_HalfPel_C(UChar *ref,UChar *blk,Int dmin,Int width,Int rx,Int xh,Int yh,void *extra_info)\nInt SAD_MB_HP_HTFM_Collect(UChar *ref,UChar *blk,Int dmin,Int width,Int rx,Int xh,Int yh,void *extra_info)\nInt SAD_MB_HP_HTFM(UChar *ref,UChar *blk,Int dmin,Int width,Int rx,Int xh,Int yh,void *extra_info)\nInt SAD_Blk_HalfPel_C(UChar *ref,UChar *blk,Int dmin,Int width,Int rx,Int xh,Int yh,void *extra_info)\n*/\n\n//#include <stdlib.h> /* for RAND_MAX */\n#include \"oscl_base_macros.h\"  // for OSCL_UNUSED_ARG \n#include \"mp4def.h\"\n#include \"mp4lib_int.h\"\n#include \"sad_halfpel_inline.h\"\n\n#ifdef _SAD_STAT\nULong num_sad_HP_MB = 0;\nULong num_sad_HP_Blk = 0;\nULong num_sad_HP_MB_call = 0;\nULong num_sad_HP_Blk_call = 0;\n#define NUM_SAD_HP_MB_CALL()    num_sad_HP_MB_call++\n#define NUM_SAD_HP_MB()         num_sad_HP_MB++\n#define NUM_SAD_HP_BLK_CALL()   num_sad_HP_Blk_call++\n#define NUM_SAD_HP_BLK()        num_sad_HP_Blk++\n#else\n#define NUM_SAD_HP_MB_CALL()\n#define NUM_SAD_HP_MB()\n#define NUM_SAD_HP_BLK_CALL()\n#define NUM_SAD_HP_BLK()\n#endif\n\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n    /*==================================================================\n        Function:   HalfPel1_SAD_MB\n        Date:       03/27/2001\n        Purpose:    Compute SAD 16x16 between blk and ref in halfpel\n                    resolution,\n        Changes:\n      ==================================================================*/\n    /* One component is half-pel */\n    Int HalfPel1_SAD_MB(UChar *ref, UChar *blk, Int dmin, Int width, Int ih, Int jh)\n    {\n        Int i, j;\n        Int sad = 0;\n        UChar *kk, *p1, *p2;\n        Int temp;\n\n        OSCL_UNUSED_ARG(jh);\n\n        p1 = ref;\n        if (ih) p2 = ref + 1;\n        else p2 = ref + width;\n        kk  = blk;\n\n        for (i = 0; i < 16; i++)\n        {\n            for (j = 0; j < 16; j++)\n            {\n\n                temp = ((p1[j] + p2[j] + 1) >> 1) - *kk++;\n                sad += PV_ABS(temp);\n            }\n\n            if (sad > dmin)\n                return sad;\n            p1 += width;\n            p2 += width;\n        }\n        return sad;\n    }\n\n    /* Two components need half-pel */\n    Int HalfPel2_SAD_MB(UChar *ref, UChar *blk, Int dmin, Int width)\n    {\n        Int i, j;\n        Int sad = 0;\n        UChar *kk, *p1, *p2, *p3, *p4;\n        Int temp;\n\n        p1 = ref;\n        p2 = ref + 1;\n        p3 = ref + width;\n        p4 = ref + width + 1;\n        kk  = blk;\n\n        for (i = 0; i < 16; i++)\n        {\n            for (j = 0; j < 16; j++)\n            {\n\n                temp = ((p1[j] + p2[j] + p3[j] + p4[j] + 2) >> 2) - *kk++;\n                sad += PV_ABS(temp);\n            }\n\n            if (sad > dmin)\n                return sad;\n\n            p1 += width;\n            p3 += width;\n            p2 += width;\n            p4 += width;\n        }\n        return sad;\n    }\n\n#ifndef NO_INTER4V\n    /*==================================================================\n        Function:   HalfPel1_SAD_Blk\n        Date:       03/27/2001\n        Purpose:    Compute SAD 8x8 between blk and ref in halfpel\n                    resolution.\n        Changes:\n      ==================================================================*/\n    /* One component needs half-pel */\n    Int HalfPel1_SAD_Blk(UChar *ref, UChar *blk, Int dmin, Int width, Int ih, Int jh)\n    {\n        Int i, j;\n        Int sad = 0;\n        UChar *kk, *p1, *p2;\n        Int temp;\n\n        OSCL_UNUSED_ARG(jh);\n\n        p1 = ref;\n        if (ih) p2 = ref + 1;\n        else p2 = ref + width;\n        kk  = blk;\n\n        for (i = 0; i < 8; i++)\n        {\n            for (j = 0; j < 8; j++)\n            {\n\n                temp = ((p1[j] + p2[j] + 1) >> 1) - *kk++;\n                sad += PV_ABS(temp);\n            }\n\n            if (sad > dmin)\n                return sad;\n            p1 += width;\n            p2 += width;\n            kk += 8;\n        }\n        return sad;\n    }\n    /* Two components need half-pel */\n    Int HalfPel2_SAD_Blk(UChar *ref, UChar *blk, Int dmin, Int width)\n    {\n        Int i, j;\n        Int sad = 0;\n        UChar *kk, *p1, *p2, *p3, *p4;\n        Int temp;\n\n        p1 = ref;\n        p2 = ref + 1;\n        p3 = ref + width;\n        p4 = ref + width + 1;\n        kk  = blk;\n\n        for (i = 0; i < 8; i++)\n        {\n            for (j = 0; j < 8; j++)\n            {\n\n                temp = ((p1[j] + p2[j] + p3[j] + p4[j] + 2) >> 2) - *kk++;\n                sad += PV_ABS(temp);\n            }\n\n            if (sad > dmin)\n                return sad;\n\n            p1 += width;\n            p3 += width;\n            p2 += width;\n            p4 += width;\n            kk += 8;\n        }\n        return sad;\n    }\n#endif // NO_INTER4V\n    /*===============================================================\n        Function:   SAD_MB_HalfPel\n        Date:       09/17/2000\n        Purpose:    Compute the SAD on the half-pel resolution\n        Input/Output:   hmem is assumed to be a pointer to the starting\n                    point of the search in the 33x33 matrix search region\n        Changes:\n    11/7/00:     implemented MMX\n      ===============================================================*/\n    /*==================================================================\n        Function:   SAD_MB_HalfPel_C\n        Date:       04/30/2001\n        Purpose:    Compute SAD 16x16 between blk and ref in halfpel\n                    resolution,\n        Changes:\n      ==================================================================*/\n    /* One component is half-pel */\n    Int SAD_MB_HalfPel_Cxhyh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info)\n    {\n        Int i, j;\n        Int sad = 0;\n        UChar *kk, *p1, *p2, *p3, *p4;\n//  Int sumref=0;\n        Int temp;\n        Int rx = dmin_rx & 0xFFFF;\n\n        OSCL_UNUSED_ARG(extra_info);\n\n        NUM_SAD_HP_MB_CALL();\n\n        p1 = ref;\n        p2 = ref + 1;\n        p3 = ref + rx;\n        p4 = ref + rx + 1;\n        kk  = blk;\n\n        for (i = 0; i < 16; i++)\n        {\n            for (j = 0; j < 16; j++)\n            {\n\n                temp = ((p1[j] + p2[j] + p3[j] + p4[j] + 2) >> 2) - *kk++;\n                sad += PV_ABS(temp);\n            }\n\n            NUM_SAD_HP_MB();\n\n            if (sad > (Int)((ULong)dmin_rx >> 16))\n                return sad;\n\n            p1 += rx;\n            p3 += rx;\n            p2 += rx;\n            p4 += rx;\n        }\n        return sad;\n    }\n\n    Int SAD_MB_HalfPel_Cyh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info)\n    {\n        Int i, j;\n        Int sad = 0;\n        UChar *kk, *p1, *p2;\n//  Int sumref=0;\n        Int temp;\n        Int rx = dmin_rx & 0xFFFF;\n\n        OSCL_UNUSED_ARG(extra_info);\n\n        NUM_SAD_HP_MB_CALL();\n\n        p1 = ref;\n        p2 = ref + rx; /* either left/right or top/bottom pixel */\n        kk  = blk;\n\n        for (i = 0; i < 16; i++)\n        {\n            for (j = 0; j < 16; j++)\n            {\n\n                temp = ((p1[j] + p2[j] + 1) >> 1) - *kk++;\n                sad += PV_ABS(temp);\n            }\n\n            NUM_SAD_HP_MB();\n\n            if (sad > (Int)((ULong)dmin_rx >> 16))\n                return sad;\n            p1 += rx;\n            p2 += rx;\n        }\n        return sad;\n    }\n\n    Int SAD_MB_HalfPel_Cxh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info)\n    {\n        Int i, j;\n        Int sad = 0;\n        UChar *kk, *p1;\n//  Int sumref=0;\n        Int temp;\n        Int rx = dmin_rx & 0xFFFF;\n\n        OSCL_UNUSED_ARG(extra_info);\n\n        NUM_SAD_HP_MB_CALL();\n\n        p1 = ref;\n        kk  = blk;\n\n        for (i = 0; i < 16; i++)\n        {\n            for (j = 0; j < 16; j++)\n            {\n\n                temp = ((p1[j] + p1[j+1] + 1) >> 1) - *kk++;\n                sad += PV_ABS(temp);\n            }\n\n            NUM_SAD_HP_MB();\n\n            if (sad > (Int)((ULong)dmin_rx >> 16))\n                return sad;\n            p1 += rx;\n        }\n        return sad;\n    }\n\n#ifdef HTFM  /* HTFM with uniform subsampling implementation, 2/28/01 */\n\n//Checheck here\n    Int SAD_MB_HP_HTFM_Collectxhyh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info)\n    {\n        Int i, j;\n        Int sad = 0;\n        UChar *p1, *p2;\n        Int rx = dmin_rx & 0xFFFF;\n        Int refwx4 = rx << 2;\n        Int saddata[16];      /* used when collecting flag (global) is on */\n        Int difmad, tmp, tmp2;\n        HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info;\n        Int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg);\n        UInt *countbreak = &(htfm_stat->countbreak);\n        Int *offsetRef = htfm_stat->offsetRef;\n        ULong cur_word;\n\n        NUM_SAD_HP_MB_CALL();\n\n        blk -= 4;\n\n        for (i = 0; i < 16; i++) /* 16 stages */\n        {\n            p1 = ref + offsetRef[i];\n            p2 = p1 + rx;\n\n            j = 4;/* 4 lines */\n            do\n            {\n                cur_word = *((ULong*)(blk += 4));\n                tmp = p1[12] + p2[12];\n                tmp2 = p1[13] + p2[13];\n                tmp += tmp2;\n                tmp2 = (cur_word >> 24) & 0xFF;\n                tmp += 2;\n                sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;\n                tmp = p1[8] + p2[8];\n                tmp2 = p1[9] + p2[9];\n                tmp += tmp2;\n                tmp2 = (cur_word >> 16) & 0xFF;\n                tmp += 2;\n                sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;\n                tmp = p1[4] + p2[4];\n                tmp2 = p1[5] + p2[5];\n                tmp += tmp2;\n                tmp2 = (cur_word >> 8) & 0xFF;\n                tmp += 2;\n                sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;\n                tmp2 = p1[1] + p2[1];\n                tmp = p1[0] + p2[0];\n                p1 += refwx4;\n                p2 += refwx4;\n                tmp += tmp2;\n                tmp2 = (cur_word & 0xFF);\n                tmp += 2;\n                sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;\n            }\n            while (--j);\n\n            NUM_SAD_HP_MB();\n\n            saddata[i] = sad;\n\n            if (i > 0)\n            {\n                if (sad > (Int)((ULong)dmin_rx >> 16))\n                {\n                    difmad = saddata[0] - ((saddata[1] + 1) >> 1);\n                    (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);\n                    (*countbreak)++;\n                    return sad;\n                }\n            }\n        }\n        difmad = saddata[0] - ((saddata[1] + 1) >> 1);\n        (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);\n        (*countbreak)++;\n\n        return sad;\n    }\n\n    Int SAD_MB_HP_HTFM_Collectyh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info)\n    {\n        Int i, j;\n        Int sad = 0;\n        UChar *p1, *p2;\n        Int rx = dmin_rx & 0xFFFF;\n        Int refwx4 = rx << 2;\n        Int saddata[16];      /* used when collecting flag (global) is on */\n        Int difmad, tmp, tmp2;\n        HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info;\n        Int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg);\n        UInt *countbreak = &(htfm_stat->countbreak);\n        Int *offsetRef = htfm_stat->offsetRef;\n        ULong cur_word;\n\n        NUM_SAD_HP_MB_CALL();\n\n        blk -= 4;\n\n        for (i = 0; i < 16; i++) /* 16 stages */\n        {\n            p1 = ref + offsetRef[i];\n            p2 = p1 + rx;\n            j = 4;\n            do\n            {\n                cur_word = *((ULong*)(blk += 4));\n                tmp = p1[12];\n                tmp2 = p2[12];\n                tmp++;\n                tmp2 += tmp;\n                tmp = (cur_word >> 24) & 0xFF;\n                sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n                tmp = p1[8];\n                tmp2 = p2[8];\n                tmp++;\n                tmp2 += tmp;\n                tmp = (cur_word >> 16) & 0xFF;\n                sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n                tmp = p1[4];\n                tmp2 = p2[4];\n                tmp++;\n                tmp2 += tmp;\n                tmp = (cur_word >> 8) & 0xFF;\n                sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n                tmp = p1[0];\n                p1 += refwx4;\n                tmp2 = p2[0];\n                p2 += refwx4;\n                tmp++;\n                tmp2 += tmp;\n                tmp = (cur_word & 0xFF);\n                sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n            }\n            while (--j);\n\n            NUM_SAD_HP_MB();\n\n            saddata[i] = sad;\n\n            if (i > 0)\n            {\n                if (sad > (Int)((ULong)dmin_rx >> 16))\n                {\n                    difmad = saddata[0] - ((saddata[1] + 1) >> 1);\n                    (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);\n                    (*countbreak)++;\n                    return sad;\n                }\n            }\n        }\n        difmad = saddata[0] - ((saddata[1] + 1) >> 1);\n        (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);\n        (*countbreak)++;\n\n        return sad;\n    }\n\n    Int SAD_MB_HP_HTFM_Collectxh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info)\n    {\n        Int i, j;\n        Int sad = 0;\n        UChar *p1;\n        Int rx = dmin_rx & 0xFFFF;\n        Int refwx4 = rx << 2;\n        Int saddata[16];      /* used when collecting flag (global) is on */\n        Int difmad, tmp, tmp2;\n        HTFM_Stat *htfm_stat = (HTFM_Stat*) extra_info;\n        Int *abs_dif_mad_avg = &(htfm_stat->abs_dif_mad_avg);\n        UInt *countbreak = &(htfm_stat->countbreak);\n        Int *offsetRef = htfm_stat->offsetRef;\n        ULong cur_word;\n\n        NUM_SAD_HP_MB_CALL();\n\n        blk -= 4;\n\n        for (i = 0; i < 16; i++) /* 16 stages */\n        {\n            p1 = ref + offsetRef[i];\n\n            j = 4; /* 4 lines */\n            do\n            {\n                cur_word = *((ULong*)(blk += 4));\n                tmp = p1[12];\n                tmp2 = p1[13];\n                tmp++;\n                tmp2 += tmp;\n                tmp = (cur_word >> 24) & 0xFF;\n                sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n                tmp = p1[8];\n                tmp2 = p1[9];\n                tmp++;\n                tmp2 += tmp;\n                tmp = (cur_word >> 16) & 0xFF;\n                sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n                tmp = p1[4];\n                tmp2 = p1[5];\n                tmp++;\n                tmp2 += tmp;\n                tmp = (cur_word >> 8) & 0xFF;\n                sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n                tmp = p1[0];\n                tmp2 = p1[1];\n                p1 += refwx4;\n                tmp++;\n                tmp2 += tmp;\n                tmp = (cur_word & 0xFF);\n                sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n            }\n            while (--j);\n\n            NUM_SAD_HP_MB();\n\n            saddata[i] = sad;\n\n            if (i > 0)\n            {\n                if (sad > (Int)((ULong)dmin_rx >> 16))\n                {\n                    difmad = saddata[0] - ((saddata[1] + 1) >> 1);\n                    (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);\n                    (*countbreak)++;\n                    return sad;\n                }\n            }\n        }\n        difmad = saddata[0] - ((saddata[1] + 1) >> 1);\n        (*abs_dif_mad_avg) += ((difmad > 0) ? difmad : -difmad);\n        (*countbreak)++;\n\n        return sad;\n    }\n\n    Int SAD_MB_HP_HTFMxhyh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info)\n    {\n        Int i, j;\n        Int sad = 0, tmp, tmp2;\n        UChar *p1, *p2;\n        Int rx = dmin_rx & 0xFFFF;\n        Int refwx4 = rx << 2;\n        Int sadstar = 0, madstar;\n        Int *nrmlz_th = (Int*) extra_info;\n        Int *offsetRef = nrmlz_th + 32;\n        ULong cur_word;\n\n        madstar = (ULong)dmin_rx >> 20;\n\n        NUM_SAD_HP_MB_CALL();\n\n        blk -= 4;\n\n        for (i = 0; i < 16; i++) /* 16 stages */\n        {\n            p1 = ref + offsetRef[i];\n            p2 = p1 + rx;\n\n            j = 4; /* 4 lines */\n            do\n            {\n                cur_word = *((ULong*)(blk += 4));\n                tmp = p1[12] + p2[12];\n                tmp2 = p1[13] + p2[13];\n                tmp += tmp2;\n                tmp2 = (cur_word >> 24) & 0xFF;\n                tmp += 2;\n                sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;\n                tmp = p1[8] + p2[8];\n                tmp2 = p1[9] + p2[9];\n                tmp += tmp2;\n                tmp2 = (cur_word >> 16) & 0xFF;\n                tmp += 2;\n                sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;\n                tmp = p1[4] + p2[4];\n                tmp2 = p1[5] + p2[5];\n                tmp += tmp2;\n                tmp2 = (cur_word >> 8) & 0xFF;\n                tmp += 2;\n                sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;\n                tmp2 = p1[1] + p2[1];\n                tmp = p1[0] + p2[0];\n                p1 += refwx4;\n                p2 += refwx4;\n                tmp += tmp2;\n                tmp2 = (cur_word & 0xFF);\n                tmp += 2;\n                sad = INTERP2_SUB_SAD(sad, tmp, tmp2);;\n            }\n            while (--j);\n\n            NUM_SAD_HP_MB();\n\n            sadstar += madstar;\n            if (sad > sadstar - nrmlz_th[i] || sad > (Int)((ULong)dmin_rx >> 16))\n            {\n                return 65536;\n            }\n        }\n\n        return sad;\n    }\n\n    Int SAD_MB_HP_HTFMyh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info)\n    {\n        Int i, j;\n        Int sad = 0, tmp, tmp2;\n        UChar *p1, *p2;\n        Int rx = dmin_rx & 0xFFFF;\n        Int refwx4 = rx << 2;\n        Int sadstar = 0, madstar;\n        Int *nrmlz_th = (Int*) extra_info;\n        Int *offsetRef = nrmlz_th + 32;\n        ULong cur_word;\n\n        madstar = (ULong)dmin_rx >> 20;\n\n        NUM_SAD_HP_MB_CALL();\n\n        blk -= 4;\n\n        for (i = 0; i < 16; i++) /* 16 stages */\n        {\n            p1 = ref + offsetRef[i];\n            p2 = p1 + rx;\n            j = 4;\n            do\n            {\n                cur_word = *((ULong*)(blk += 4));\n                tmp = p1[12];\n                tmp2 = p2[12];\n                tmp++;\n                tmp2 += tmp;\n                tmp = (cur_word >> 24) & 0xFF;\n                sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n                tmp = p1[8];\n                tmp2 = p2[8];\n                tmp++;\n                tmp2 += tmp;\n                tmp = (cur_word >> 16) & 0xFF;\n                sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n                tmp = p1[4];\n                tmp2 = p2[4];\n                tmp++;\n                tmp2 += tmp;\n                tmp = (cur_word >> 8) & 0xFF;\n                sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n                tmp = p1[0];\n                p1 += refwx4;\n                tmp2 = p2[0];\n                p2 += refwx4;\n                tmp++;\n                tmp2 += tmp;\n                tmp = (cur_word & 0xFF);\n                sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n            }\n            while (--j);\n\n            NUM_SAD_HP_MB();\n            sadstar += madstar;\n            if (sad > sadstar - nrmlz_th[i] || sad > (Int)((ULong)dmin_rx >> 16))\n            {\n                return 65536;\n            }\n        }\n\n        return sad;\n    }\n\n    Int SAD_MB_HP_HTFMxh(UChar *ref, UChar *blk, Int dmin_rx, void *extra_info)\n    {\n        Int i, j;\n        Int sad = 0, tmp, tmp2;\n        UChar *p1;\n        Int rx = dmin_rx & 0xFFFF;\n        Int refwx4 = rx << 2;\n        Int sadstar = 0, madstar;\n        Int *nrmlz_th = (Int*) extra_info;\n        Int *offsetRef = nrmlz_th + 32;\n        ULong cur_word;\n\n        madstar = (ULong)dmin_rx >> 20;\n\n        NUM_SAD_HP_MB_CALL();\n\n        blk -= 4;\n\n        for (i = 0; i < 16; i++) /* 16 stages */\n        {\n            p1 = ref + offsetRef[i];\n\n            j = 4;/* 4 lines */\n            do\n            {\n                cur_word = *((ULong*)(blk += 4));\n                tmp = p1[12];\n                tmp2 = p1[13];\n                tmp++;\n                tmp2 += tmp;\n                tmp = (cur_word >> 24) & 0xFF;\n                sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n                tmp = p1[8];\n                tmp2 = p1[9];\n                tmp++;\n                tmp2 += tmp;\n                tmp = (cur_word >> 16) & 0xFF;\n                sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n                tmp = p1[4];\n                tmp2 = p1[5];\n                tmp++;\n                tmp2 += tmp;\n                tmp = (cur_word >> 8) & 0xFF;\n                sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n                tmp = p1[0];\n                tmp2 = p1[1];\n                p1 += refwx4;\n                tmp++;\n                tmp2 += tmp;\n                tmp = (cur_word & 0xFF);\n                sad = INTERP1_SUB_SAD(sad, tmp, tmp2);;\n            }\n            while (--j);\n\n            NUM_SAD_HP_MB();\n\n            sadstar += madstar;\n            if (sad > sadstar - nrmlz_th[i] || sad > (Int)((ULong)dmin_rx >> 16))\n            {\n                return 65536;\n            }\n        }\n\n        return sad;\n    }\n\n#endif /* HTFM */\n\n#ifndef NO_INTER4V\n    /*==================================================================\n        Function:   SAD_Blk_HalfPel_C\n        Date:       04/30/2001\n        Purpose:    Compute SAD 16x16 between blk and ref in halfpel\n                    resolution,\n        Changes:\n      ==================================================================*/\n    /* One component is half-pel */\n    Int SAD_Blk_HalfPel_C(UChar *ref, UChar *blk, Int dmin, Int width, Int rx, Int xh, Int yh, void *extra_info)\n    {\n        Int i, j;\n        Int sad = 0;\n        UChar *kk, *p1, *p2, *p3, *p4;\n        Int temp;\n\n        OSCL_UNUSED_ARG(extra_info);\n\n        NUM_SAD_HP_BLK_CALL();\n\n        if (xh && yh)\n        {\n            p1 = ref;\n            p2 = ref + xh;\n            p3 = ref + yh * rx;\n            p4 = ref + yh * rx + xh;\n            kk  = blk;\n\n            for (i = 0; i < 8; i++)\n            {\n                for (j = 0; j < 8; j++)\n                {\n\n                    temp = ((p1[j] + p2[j] + p3[j] + p4[j] + 2) >> 2) - kk[j];\n                    sad += PV_ABS(temp);\n                }\n\n                NUM_SAD_HP_BLK();\n\n                if (sad > dmin)\n                    return sad;\n\n                p1 += rx;\n                p3 += rx;\n                p2 += rx;\n                p4 += rx;\n                kk += width;\n            }\n            return sad;\n        }\n        else\n        {\n            p1 = ref;\n            p2 = ref + xh + yh * rx; /* either left/right or top/bottom pixel */\n\n            kk  = blk;\n\n            for (i = 0; i < 8; i++)\n            {\n                for (j = 0; j < 8; j++)\n                {\n\n                    temp = ((p1[j] + p2[j] + 1) >> 1) - kk[j];\n                    sad += PV_ABS(temp);\n                }\n\n                NUM_SAD_HP_BLK();\n\n                if (sad > dmin)\n                    return sad;\n                p1 += rx;\n                p2 += rx;\n                kk += width;\n            }\n            return sad;\n        }\n    }\n#endif /* NO_INTER4V */\n\n#ifdef __cplusplus\n}\n#endif\n\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/sad_halfpel_inline.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*********************************************************************************/\n/*  Filename: sad_halfpel_inline.h                                                      */\n/*  Description: Implementation for in-line functions used in dct.cpp           */\n/*  Modified:                                                                   */\n/*********************************************************************************/\n\n#ifndef _SAD_HALFPEL_INLINE_H_\n#define _SAD_HALFPEL_INLINE_H_\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n#if !defined(PV_ARM_GCC_V5) && !defined(PV_ARM_GCC_V4) /* ARM GNU COMPILER  */\n\n    __inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)\n    {\n        tmp = (tmp2 >> 1) - tmp;\n        if (tmp > 0) sad += tmp;\n        else sad -= tmp;\n\n        return sad;\n    }\n\n    __inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)\n    {\n        tmp = (tmp >> 2) - tmp2;\n        if (tmp > 0) sad += tmp;\n        else sad -= tmp;\n\n        return sad;\n    }\n\n#elif defined(__CC_ARM)  /* only work with arm v5 */\n\n    __inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)\n    {\n        __asm\n        {\n            rsbs    tmp, tmp, tmp2, asr #1 ;\n            rsbmi   tmp, tmp, #0 ;\n            add     sad, sad, tmp ;\n        }\n\n        return sad;\n    }\n\n    __inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)\n    {\n        __asm\n        {\n            rsbs    tmp, tmp2, tmp, asr #2 ;\n            rsbmi   tmp, tmp, #0 ;\n            add     sad, sad, tmp ;\n        }\n\n        return sad;\n    }\n\n#elif ( defined(PV_ARM_GCC_V5) || defined(PV_ARM_GCC_V4) ) /* ARM GNU COMPILER  */\n\n\n    __inline int32 INTERP1_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)\n    {\n        register int32 out;\n        register int32 temp1;\n        register int32 ss = sad;\n        register int32 tt = tmp;\n        register int32 uu = tmp2;\n\n        asm volatile(\"rsbs  %1, %3, %4, asr #1\\n\\t\"\n                     \"rsbmi %1, %1, #0\\n\\t\"\n                     \"add  %0, %2, %1\"\n             : \"=&r\"(out),\n                     \"=&r\"(temp1)\n                             : \"r\"(ss),\n                             \"r\"(tt),\n                             \"r\"(uu));\n        return out;\n    }\n\n\n    __inline int32 INTERP2_SUB_SAD(int32 sad, int32 tmp, int32 tmp2)\n{\n        register int32 out;\n        register int32 temp1;\n        register int32 ss = sad;\n        register int32 tt = tmp;\n        register int32 uu = tmp2;\n\n        asm volatile(\"rsbs      %1, %4, %3, asr #2\\n\\t\"\n                     \"rsbmi %1, %1, #0\\n\\t\"\n                     \"add  %0, %2, %1\"\n             : \"=&r\"(out),\n                     \"=&r\"(temp1)\n                             : \"r\"(ss),\n                             \"r\"(tt),\n                             \"r\"(uu));\n        return out;\n    }\n\n\n#endif // Diff OS\n\n\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif //_SAD_HALFPEL_INLINE_H_\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/sad_inline.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*********************************************************************************/\n/*  Filename: sad_inline.h                                                      */\n/*  Description: Implementation for in-line functions used in dct.cpp           */\n/*  Modified:                                                                   */\n/*********************************************************************************/\n#ifndef _SAD_INLINE_H_\n#define _SAD_INLINE_H_\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n#if !defined(PV_ARM_GCC_V5) && !defined(PV_ARM_GCC_V4) /* ARM GNU COMPILER  */\n\n    __inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2)\n    {\n        tmp = tmp - tmp2;\n        if (tmp > 0) sad += tmp;\n        else sad -= tmp;\n\n        return sad;\n    }\n\n    __inline int32 sad_4pixel(int32 src1, int32 src2, int32 mask)\n    {\n        int32 x7;\n\n        x7 = src2 ^ src1;       /* check odd/even combination */\n        if ((uint32)src2 >= (uint32)src1)\n        {\n            src1 = src2 - src1;     /* subs */\n        }\n        else\n        {\n            src1 = src1 - src2;\n        }\n        x7 = x7 ^ src1;     /* only odd bytes need to add carry */\n        x7 = mask & ((uint32)x7 >> 1);\n        x7 = (x7 << 8) - x7;\n        src1 = src1 + (x7 >> 7); /* add 0xFF to the negative byte, add back carry */\n        src1 = src1 ^(x7 >> 7);   /* take absolute value of negative byte */\n\n        return src1;\n    }\n\n#define NUMBER 3\n#define SHIFT 24\n\n#include \"sad_mb_offset.h\"\n\n#undef NUMBER\n#define NUMBER 2\n#undef SHIFT\n#define SHIFT 16\n#include \"sad_mb_offset.h\"\n\n#undef NUMBER\n#define NUMBER 1\n#undef SHIFT\n#define SHIFT 8\n#include \"sad_mb_offset.h\"\n\n\n    __inline int32 simd_sad_mb(UChar *ref, UChar *blk, Int dmin, Int lx)\n    {\n        int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;\n\n        x9 = 0x80808080; /* const. */\n\n        x8 = (uint32)ref & 0x3;\n        if (x8 == 3)\n            goto SadMBOffset3;\n        if (x8 == 2)\n            goto SadMBOffset2;\n        if (x8 == 1)\n            goto SadMBOffset1;\n\n//  x5 = (x4<<8)-x4; /* x5 = x4*255; */\n        x4 = x5 = 0;\n\n        x6 = 0xFFFF00FF;\n\n        ref -= lx;\n        blk -= 16;\n\n        x8 = 16;\n\nLOOP_SAD0:\n        /****** process 8 pixels ******/\n        x10 = *((uint32*)(ref += lx));\n        x11 = *((uint32*)(ref + 4));\n        x12 = *((uint32*)(blk += 16));\n        x14 = *((uint32*)(blk + 4));\n\n        /* process x11 & x14 */\n        x11 = sad_4pixel(x11, x14, x9);\n\n        /* process x12 & x10 */\n        x10 = sad_4pixel(x10, x12, x9);\n\n        x5 = x5 + x10; /* accumulate low bytes */\n        x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x10 >> 8);  /* accumulate high bytes */\n        x5 = x5 + x11;  /* accumulate low bytes */\n        x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x11 >> 8);  /* accumulate high bytes */\n\n        /****** process 8 pixels ******/\n        x10 = *((uint32*)(ref + 8));\n        x11 = *((uint32*)(ref + 12));\n        x12 = *((uint32*)(blk + 8));\n        x14 = *((uint32*)(blk + 12));\n\n        /* process x11 & x14 */\n        x11 = sad_4pixel(x11, x14, x9);\n\n        /* process x12 & x10 */\n        x10 = sad_4pixel(x10, x12, x9);\n\n        x5 = x5 + x10;  /* accumulate low bytes */\n        x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */\n        x5 = x5 + x11;  /* accumulate low bytes */\n        x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x11 >> 8);  /* accumulate high bytes */\n\n        /****************/\n        x10 = x5 - (x4 << 8); /* extract low bytes */\n        x10 = x10 + x4;     /* add with high bytes */\n        x10 = x10 + (x10 << 16); /* add with lower half word */\n\n        if (((uint32)x10 >> 16) <= (uint32)dmin) /* compare with dmin */\n        {\n            if (--x8)\n            {\n                goto LOOP_SAD0;\n            }\n\n        }\n\n        return ((uint32)x10 >> 16);\n\nSadMBOffset3:\n\n        return sad_mb_offset3(ref, blk, lx, dmin);\n\nSadMBOffset2:\n\n        return sad_mb_offset2(ref, blk, lx, dmin);\n\nSadMBOffset1:\n\n        return sad_mb_offset1(ref, blk, lx, dmin);\n\n    }\n\n#elif defined(__CC_ARM)  /* only work with arm v5 */\n\n    __inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2)\n    {\n        __asm\n        {\n            rsbs    tmp, tmp, tmp2 ;\n            rsbmi   tmp, tmp, #0 ;\n            add     sad, sad, tmp ;\n        }\n\n        return sad;\n    }\n\n    __inline int32 sad_4pixel(int32 src1, int32 src2, int32 mask)\n    {\n        int32 x7;\n\n        __asm\n        {\n            EOR     x7, src2, src1;     /* check odd/even combination */\n            SUBS    src1, src2, src1;\n            EOR     x7, x7, src1;\n            AND     x7, mask, x7, lsr #1;\n            ORRCC   x7, x7, #0x80000000;\n            RSB     x7, x7, x7, lsl #8;\n            ADD     src1, src1, x7, asr #7;   /* add 0xFF to the negative byte, add back carry */\n            EOR     src1, src1, x7, asr #7;   /* take absolute value of negative byte */\n        }\n\n        return src1;\n    }\n\n    __inline int32 sad_4pixelN(int32 src1, int32 src2, int32 mask)\n    {\n        int32 x7;\n\n        __asm\n        {\n            EOR      x7, src2, src1;        /* check odd/even combination */\n            ADDS     src1, src2, src1;\n            EOR      x7, x7, src1;      /* only odd bytes need to add carry */\n            ANDS     x7, mask, x7, rrx;\n            RSB      x7, x7, x7, lsl #8;\n            SUB      src1, src1, x7, asr #7;  /* add 0xFF to the negative byte, add back carry */\n            EOR      src1, src1, x7, asr #7; /* take absolute value of negative byte */\n        }\n\n        return src1;\n    }\n\n#define sum_accumulate  __asm{      SBC      x5, x5, x10;  /* accumulate low bytes */ \\\n        BIC      x10, x6, x10;   /* x10 & 0xFF00FF00 */ \\\n        ADD      x4, x4, x10,lsr #8;   /* accumulate high bytes */ \\\n        SBC      x5, x5, x11;    /* accumulate low bytes */ \\\n        BIC      x11, x6, x11;   /* x11 & 0xFF00FF00 */ \\\n        ADD      x4, x4, x11,lsr #8; } /* accumulate high bytes */\n\n\n#define NUMBER 3\n#define SHIFT 24\n#define INC_X8 0x08000001\n\n#include \"sad_mb_offset.h\"\n\n#undef NUMBER\n#define NUMBER 2\n#undef SHIFT\n#define SHIFT 16\n#undef INC_X8\n#define INC_X8 0x10000001\n#include \"sad_mb_offset.h\"\n\n#undef NUMBER\n#define NUMBER 1\n#undef SHIFT\n#define SHIFT 8\n#undef INC_X8\n#define INC_X8 0x08000001\n#include \"sad_mb_offset.h\"\n\n\n    __inline int32 simd_sad_mb(UChar *ref, UChar *blk, Int dmin, Int lx)\n    {\n        int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;\n\n        x9 = 0x80808080; /* const. */\n        x4 = x5 = 0;\n\n        __asm\n        {\n            MOVS    x8, ref, lsl #31 ;\n            BHI     SadMBOffset3;\n            BCS     SadMBOffset2;\n            BMI     SadMBOffset1;\n\n            MVN     x6, #0xFF00;\n        }\nLOOP_SAD0:\n        /****** process 8 pixels ******/\n        x11 = *((int32*)(ref + 12));\n        x10 = *((int32*)(ref + 8));\n        x14 = *((int32*)(blk + 12));\n        x12 = *((int32*)(blk + 8));\n\n        /* process x11 & x14 */\n        x11 = sad_4pixel(x11, x14, x9);\n\n        /* process x12 & x10 */\n        x10 = sad_4pixel(x10, x12, x9);\n\n        x5 = x5 + x10;  /* accumulate low bytes */\n        x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */\n        x5 = x5 + x11;  /* accumulate low bytes */\n        x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x11 >> 8);  /* accumulate high bytes */\n\n        __asm\n        {\n            /****** process 8 pixels ******/\n            LDR     x11, [ref, #4];\n            LDR     x10, [ref], lx ;\n            LDR     x14, [blk, #4];\n            LDR     x12, [blk], #16 ;\n        }\n\n        /* process x11 & x14 */\n        x11 = sad_4pixel(x11, x14, x9);\n\n        /* process x12 & x10 */\n        x10 = sad_4pixel(x10, x12, x9);\n\n        x5 = x5 + x10;  /* accumulate low bytes */\n        x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */\n        x5 = x5 + x11;  /* accumulate low bytes */\n        x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x11 >> 8);  /* accumulate high bytes */\n\n        /****************/\n        x10 = x5 - (x4 << 8); /* extract low bytes */\n        x10 = x10 + x4;     /* add with high bytes */\n        x10 = x10 + (x10 << 16); /* add with lower half word */\n\n        __asm\n        {\n            /****************/\n            RSBS    x11, dmin, x10, lsr #16;\n            ADDLSS  x8, x8, #0x10000001;\n            BLS     LOOP_SAD0;\n        }\n\n        return ((uint32)x10 >> 16);\n\nSadMBOffset3:\n\n        return sad_mb_offset3(ref, blk, lx, dmin, x8);\n\nSadMBOffset2:\n\n        return sad_mb_offset2(ref, blk, lx, dmin, x8);\n\nSadMBOffset1:\n\n        return sad_mb_offset1(ref, blk, lx, dmin, x8);\n    }\n\n\n#elif ( defined(PV_ARM_GCC_V5) || defined(PV_ARM_GCC_V4) ) /* ARM GNU COMPILER  */\n\n    __inline int32 SUB_SAD(int32 sad, int32 tmp, int32 tmp2)\n    {\n        register int32 out;\n        register int32 temp1;\n        register int32 ss = sad;\n        register int32 tt = tmp;\n        register int32 uu = tmp2;\n\n        asm volatile(\"rsbs  %1, %4, %3\\n\\t\"\n                     \"rsbmi %1, %1, #0\\n\\t\"\n                     \"add   %0, %2, %1\"\n             : \"=&r\"(out),\n                     \"=&r\"(temp1)\n                             : \"r\"(ss),\n                             \"r\"(tt),\n                             \"r\"(uu));\n        return out;\n    }\n\n    __inline int32 sad_4pixel(int32 src1, int32 src2, int32 mask)\n{\n        register int32 out;\n        register int32 temp1;\n        register int32 s1 = src1;\n        register int32 s2 = src2;\n        register int32 mm = mask;\n\n        asm volatile(\"eor   %0, %3, %2\\n\\t\"\n                     \"subs  %1, %3, %2\\n\\t\"\n                     \"eor   %0, %0, %1\\n\\t\"\n                     \"and   %0, %4, %0, lsr #1\\n\\t\"\n                     \"orrcc %0, %0, #0x80000000\\n\\t\"\n                     \"rsb   %0, %0, %0, lsl #8\\n\\t\"\n                     \"add   %1, %1, %0, asr #7\\n\\t\"\n                     \"eor   %1, %1, %0, asr #7\"\n             : \"=&r\"(out),\n                     \"=&r\"(temp1)\n                             : \"r\"(s1),\n                             \"r\"(s2),\n                             \"r\"(mm));\n\n        return temp1;\n    }\n\n    __inline int32 sad_4pixelN(int32 src1, int32 src2, int32 mask)\n{\n        register int32 out;\n        register int32 temp1;\n        register int32 s1 = src1;\n        register int32 s2 = src2;\n        register int32 mm = mask;\n\n        asm volatile(\"eor    %1, %3, %2\\n\\t\"\n                     \"adds   %0, %3, %2\\n\\t\"\n                     \"eor    %1, %1, %0\\n\\t\"\n                     \"ands   %1, %4, %1,rrx\\n\\t\"\n                     \"rsb    %1, %1, %1, lsl #8\\n\\t\"\n                     \"sub    %0, %0, %1, asr #7\\n\\t\"\n                     \"eor    %0, %0, %1, asr #7\"\n             : \"=&r\"(out),\n                     \"=&r\"(temp1)\n                             : \"r\"(s1),\n                             \"r\"(s2),\n                             \"r\"(mm));\n\n        return (out);\n    }\n\n#define sum_accumulate asm volatile(\"sbc  %0, %0, %1\\n\\t\" \\\n                                \"bic  %1, %4, %1\\n\\t\" \\\n                                \"add  %2, %2, %1, lsr #8\\n\\t\" \\\n                                \"sbc  %0, %0, %3\\n\\t\" \\\n                                \"bic  %3, %4, %3\\n\\t\" \\\n                                \"add  %2, %2, %3, lsr #8\" \\\n                                :\"+r\"(x5), \"+r\"(x10), \"+r\"(x4), \"+r\"(x11) \\\n                                :\"r\"(x6));\n\n#define NUMBER 3\n#define SHIFT 24\n#define INC_X8 0x08000001\n\n#include \"sad_mb_offset.h\"\n\n#undef NUMBER\n#define NUMBER 2\n#undef SHIFT\n#define SHIFT 16\n#undef INC_X8\n#define INC_X8 0x10000001\n#include \"sad_mb_offset.h\"\n\n#undef NUMBER\n#define NUMBER 1\n#undef SHIFT\n#define SHIFT 8\n#undef INC_X8\n#define INC_X8 0x08000001\n#include \"sad_mb_offset.h\"\n\n\n    __inline int32 simd_sad_mb(UChar *ref, UChar *blk, Int dmin, Int lx)\n{\n        int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;\n\n        x9 = 0x80808080; /* const. */\n        x4 = x5 = 0;\n\n        x8 = (uint32)ref & 0x3;\n        if (x8 == 3)\n            goto SadMBOffset3;\n        if (x8 == 2)\n            goto SadMBOffset2;\n        if (x8 == 1)\n            goto SadMBOffset1;\n\nasm volatile(\"mvn %0, #0xFF00\": \"=r\"(x6));\n\nLOOP_SAD0:\n        /****** process 8 pixels ******/\n        x11 = *((int32*)(ref + 12));\n        x10 = *((int32*)(ref + 8));\n        x14 = *((int32*)(blk + 12));\n        x12 = *((int32*)(blk + 8));\n\n        /* process x11 & x14 */\n        x11 = sad_4pixel(x11, x14, x9);\n\n        /* process x12 & x10 */\n        x10 = sad_4pixel(x10, x12, x9);\n\n        x5 = x5 + x10;  /* accumulate low bytes */\n        x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */\n        x5 = x5 + x11;  /* accumulate low bytes */\n        x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x11 >> 8);  /* accumulate high bytes */\n\n        asm volatile(\"ldr  %0, [%4, #4]\\n\\t\"\n                     \"ldr  %1, [%4], %6\\n\\t\"\n                     \"ldr  %2, [%5, #4]\\n\\t\"\n                     \"ldr  %3, [%5], #16\"\n             : \"=r\"(x11), \"=r\"(x10), \"=r\"(x14), \"=r\"(x12), \"+r\"(ref), \"+r\"(blk)\n                             : \"r\"(lx));\n\n        /* process x11 & x14 */\n        x11 = sad_4pixel(x11, x14, x9);\n\n        /* process x12 & x10 */\n        x10 = sad_4pixel(x10, x12, x9);\n\n        x5 = x5 + x10;  /* accumulate low bytes */\n        x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x10 >> 8); /* accumulate high bytes */\n        x5 = x5 + x11;  /* accumulate low bytes */\n        x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */\n        x4 = x4 + ((uint32)x11 >> 8);  /* accumulate high bytes */\n\n        /****************/\n        x10 = x5 - (x4 << 8); /* extract low bytes */\n        x10 = x10 + x4;     /* add with high bytes */\n        x10 = x10 + (x10 << 16); /* add with lower half word */\n\n        if (((uint32)x10 >> 16) <= (uint32)dmin) /* compare with dmin */\n        {\n            if (--x8)\n            {\n                goto LOOP_SAD0;\n            }\n\n        }\n\n        return ((uint32)x10 >> 16);\n\nSadMBOffset3:\n\n        return sad_mb_offset3(ref, blk, lx, dmin);\n\nSadMBOffset2:\n\n        return sad_mb_offset2(ref, blk, lx, dmin);\n\nSadMBOffset1:\n\n        return sad_mb_offset1(ref, blk, lx, dmin);\n    }\n\n#endif // OS\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif // _SAD_INLINE_H_\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/sad_mb_offset.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/*********************************************************************************/\n/*  Filename: sad_mb_offset.h                                                       */\n/*  Description: Implementation for in-line functions used in dct.cpp           */\n/*  Modified:                                                                   */\n/*********************************************************************************/\n\n#if !defined(PV_ARM_GCC_V4) && !defined(PV_ARM_GCC_V5) /* ARM GNU COMPILER  */\n\n#if (NUMBER==3)\n__inline int32 sad_mb_offset3(UChar *ref, UChar *blk, Int lx, Int dmin)\n#elif (NUMBER==2)\n__inline int32 sad_mb_offset2(UChar *ref, UChar *blk, Int lx, Int dmin)\n#elif (NUMBER==1)\n__inline int32 sad_mb_offset1(UChar *ref, UChar *blk, Int lx, Int dmin)\n#endif\n{\n    int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;\n\n    //  x5 = (x4<<8) - x4;\n    x4 = x5 = 0;\n    x6 = 0xFFFF00FF;\n    x9 = 0x80808080; /* const. */\n    ref -= NUMBER; /* bic ref, ref, #3 */\n    ref -= lx;\n    blk -= 16;\n    x8 = 16;\n\n#if (NUMBER==3)\nLOOP_SAD3:\n#elif (NUMBER==2)\nLOOP_SAD2:\n#elif (NUMBER==1)\nLOOP_SAD1:\n#endif\n    /****** process 8 pixels ******/\n    x10 = *((uint32*)(ref += lx)); /* D C B A */\n    x11 = *((uint32*)(ref + 4));    /* H G F E */\n    x12 = *((uint32*)(ref + 8));    /* L K J I */\n\n    x10 = ((uint32)x10 >> SHIFT); /* 0 0 0 D */\n    x10 = x10 | (x11 << (32 - SHIFT));        /* G F E D */\n    x11 = ((uint32)x11 >> SHIFT); /* 0 0 0 H */\n    x11 = x11 | (x12 << (32 - SHIFT));        /* K J I H */\n\n    x12 = *((uint32*)(blk += 16));\n    x14 = *((uint32*)(blk + 4));\n\n    /* process x11 & x14 */\n    x11 = sad_4pixel(x11, x14, x9);\n\n    /* process x12 & x10 */\n    x10 = sad_4pixel(x10, x12, x9);\n\n    x5 = x5 + x10; /* accumulate low bytes */\n    x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */\n    x4 = x4 + ((uint32)x10 >> 8);  /* accumulate high bytes */\n    x5 = x5 + x11;  /* accumulate low bytes */\n    x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */\n    x4 = x4 + ((uint32)x11 >> 8);  /* accumulate high bytes */\n\n    /****** process 8 pixels ******/\n    x10 = *((uint32*)(ref + 8)); /* D C B A */\n    x11 = *((uint32*)(ref + 12));   /* H G F E */\n    x12 = *((uint32*)(ref + 16));   /* L K J I */\n\n    x10 = ((uint32)x10 >> SHIFT); /* mvn x10, x10, lsr #24  = 0xFF 0xFF 0xFF ~D */\n    x10 = x10 | (x11 << (32 - SHIFT));        /* bic x10, x10, x11, lsl #8 = ~G ~F ~E ~D */\n    x11 = ((uint32)x11 >> SHIFT); /* 0xFF 0xFF 0xFF ~H */\n    x11 = x11 | (x12 << (32 - SHIFT));        /* ~K ~J ~I ~H */\n\n    x12 = *((uint32*)(blk + 8));\n    x14 = *((uint32*)(blk + 12));\n\n    /* process x11 & x14 */\n    x11 = sad_4pixel(x11, x14, x9);\n\n    /* process x12 & x10 */\n    x10 = sad_4pixel(x10, x12, x9);\n\n    x5 = x5 + x10; /* accumulate low bytes */\n    x10 = x10 & (x6 << 8); /* x10 & 0xFF00FF00 */\n    x4 = x4 + ((uint32)x10 >> 8);  /* accumulate high bytes */\n    x5 = x5 + x11;  /* accumulate low bytes */\n    x11 = x11 & (x6 << 8); /* x11 & 0xFF00FF00 */\n    x4 = x4 + ((uint32)x11 >> 8);  /* accumulate high bytes */\n\n    /****************/\n    x10 = x5 - (x4 << 8); /* extract low bytes */\n    x10 = x10 + x4;     /* add with high bytes */\n    x10 = x10 + (x10 << 16); /* add with lower half word */\n\n    if (((uint32)x10 >> 16) <= (uint32)dmin) /* compare with dmin */\n    {\n        if (--x8)\n        {\n#if (NUMBER==3)\n            goto         LOOP_SAD3;\n#elif (NUMBER==2)\n            goto         LOOP_SAD2;\n#elif (NUMBER==1)\n            goto         LOOP_SAD1;\n#endif\n        }\n\n    }\n\n    return ((uint32)x10 >> 16);\n}\n\n#elif defined(__CC_ARM)  /* only work with arm v5 */\n\n#if (NUMBER==3)\n__inline int32 sad_mb_offset3(UChar *ref, UChar *blk, Int lx, Int dmin, int32 x8)\n#elif (NUMBER==2)\n__inline int32 sad_mb_offset2(UChar *ref, UChar *blk, Int lx, Int dmin, int32 x8)\n#elif (NUMBER==1)\n__inline int32 sad_mb_offset1(UChar *ref, UChar *blk, Int lx, Int dmin, int32 x8)\n#endif\n{\n    int32 x4, x5, x6, x9, x10, x11, x12, x14;\n\n    x9 = 0x80808080; /* const. */\n    x4 = x5 = 0;\n\n    __asm{\n        MVN      x6, #0xff0000;\n        BIC      ref, ref, #3;\n\n#if (NUMBER==3)\nLOOP_SAD3:\n#elif (NUMBER==2)\nLOOP_SAD2:\n#elif (NUMBER==1)\nLOOP_SAD1:\n#endif\n    }\n    /****** process 8 pixels ******/\n    x11 = *((int32*)(ref + 12));\n    x12 = *((int32*)(ref + 16));\n    x10 = *((int32*)(ref + 8));\n    x14 = *((int32*)(blk + 12));\n\n    __asm{\n        MVN      x10, x10, lsr #SHIFT;\n        BIC      x10, x10, x11, lsl #(32-SHIFT);\n        MVN      x11, x11, lsr #SHIFT;\n        BIC      x11, x11, x12, lsl #(32-SHIFT);\n\n        LDR      x12, [blk, #8];\n    }\n\n    /* process x11 & x14 */\n    x11 = sad_4pixelN(x11, x14, x9);\n\n    /* process x12 & x10 */\n    x10 = sad_4pixelN(x10, x12, x9);\n\n    sum_accumulate;\n\n    __asm{\n        /****** process 8 pixels ******/\n        LDR      x11, [ref, #4];\n        LDR      x12, [ref, #8];\n        LDR  x10, [ref], lx ;\n        LDR  x14, [blk, #4];\n\n        MVN      x10, x10, lsr #SHIFT;\n        BIC      x10, x10, x11, lsl #(32-SHIFT);\n        MVN      x11, x11, lsr #SHIFT;\n        BIC      x11, x11, x12, lsl #(32-SHIFT);\n\n        LDR      x12, [blk], #16;\n    }\n\n    /* process x11 & x14 */\n    x11 = sad_4pixelN(x11, x14, x9);\n\n    /* process x12 & x10 */\n    x10 = sad_4pixelN(x10, x12, x9);\n\n    sum_accumulate;\n\n    /****************/\n    x10 = x5 - (x4 << 8); /* extract low bytes */\n    x10 = x10 + x4;     /* add with high bytes */\n    x10 = x10 + (x10 << 16); /* add with lower half word */\n\n    __asm{\n        RSBS     x11, dmin, x10, lsr #16\n        ADDLSS   x8, x8, #INC_X8\n#if (NUMBER==3)\n        BLS      LOOP_SAD3;\n#elif (NUMBER==2)\nBLS      LOOP_SAD2;\n#elif (NUMBER==1)\nBLS      LOOP_SAD1;\n#endif\n    }\n\n    return ((uint32)x10 >> 16);\n}\n\n#elif ( defined(PV_ARM_GCC_V5) || defined(PV_ARM_GCC_V4) ) /* ARM GNU COMPILER  */\n\n#if (NUMBER==3)\n__inline int32 sad_mb_offset3(UChar *ref, UChar *blk, Int lx, Int dmin)\n#elif (NUMBER==2)\n__inline int32 sad_mb_offset2(UChar *ref, UChar *blk, Int lx, Int dmin)\n#elif (NUMBER==1)\n__inline int32 sad_mb_offset1(UChar *ref, UChar *blk, Int lx, Int dmin)\n#endif\n{\n    int32 x4, x5, x6, x8, x9, x10, x11, x12, x14;\n\n    //  x5 = (x4<<8) - x4;\n    x4 = x5 = 0;\n    x6 = 0xFFFF00FF;\n    x9 = 0x80808080; /* const. */\n    ref -= NUMBER; /* bic ref, ref, #3 */\n    ref -= lx;\n    x8 = 16;\n\n#if (NUMBER==3)\nLOOP_SAD3:\n#elif (NUMBER==2)\nLOOP_SAD2:\n#elif (NUMBER==1)\nLOOP_SAD1:\n#endif\n    /****** process 8 pixels ******/\n    x10 = *((uint32*)(ref += lx)); /* D C B A */\n    x11 = *((uint32*)(ref + 4));    /* H G F E */\n    x12 = *((uint32*)(ref + 8));    /* L K J I */\n\n    int32 shift = SHIFT;\n    int32 shift2 = 32 - SHIFT;\n    asm volatile(\"ldr  %3, [%4, #4]\\n\\t\"\n                 \"mvn  %0, %0, lsr %5\\n\\t\"\n                 \"bic  %0, %0, %1, lsl %6\\n\\t\"\n                 \"mvn  %1, %1, lsr %5\\n\\t\"\n                 \"bic  %1, %1, %2, lsl %6\\n\\t\"\n                 \"ldr  %2, [%4, #8]\"\n             : \"+r\"(x10), \"+r\"(x11), \"+r\"(x12), \"=r\"(x14)\n                         : \"r\"(blk), \"r\"(shift), \"r\"(shift2));\n\n    /* process x11 & x14 */\n    x11 = sad_4pixel(x11, x14, x9);\n\n    /* process x12 & x10 */\n    x10 = sad_4pixel(x10, x12, x9);\n\n    sum_accumulate;\n\n    /****** process 8 pixels ******/\n    x10 = *((uint32*)(ref + 8)); /* D C B A */\n    x11 = *((uint32*)(ref + 12));   /* H G F E */\n    x12 = *((uint32*)(ref + 16));   /* L K J I */\n\n    asm volatile(\"ldr  %3, [%4, #4]\\n\\t\"\n                 \"mvn  %0, %0, lsr %5\\n\\t\"\n                 \"bic  %0, %0, %1, lsl %6\\n\\t\"\n                 \"mvn  %1, %1, lsr %5\\n\\t\"\n                 \"bic  %1, %1, %2, lsl %6\\n\\t\"\n                 \"ldr  %2, [%4, #8]\"\n             : \"+r\"(x10), \"+r\"(x11), \"+r\"(x12), \"=r\"(x14)\n                         : \"r\"(blk), \"r\"(shift), \"r\"(shift2));\n\n    /* process x11 & x14 */\n    x11 = sad_4pixel(x11, x14, x9);\n\n    /* process x12 & x10 */\n    x10 = sad_4pixel(x10, x12, x9);\n\n    sum_accumulate;\n\n    /****************/\n    x10 = x5 - (x4 << 8); /* extract low bytes */\n    x10 = x10 + x4;     /* add with high bytes */\n    x10 = x10 + (x10 << 16); /* add with lower half word */\n\n    if (((uint32)x10 >> 16) <= (uint32)dmin) /* compare with dmin */\n    {\n        if (--x8)\n        {\n#if (NUMBER==3)\n            goto         LOOP_SAD3;\n#elif (NUMBER==2)\ngoto         LOOP_SAD2;\n#elif (NUMBER==1)\ngoto         LOOP_SAD1;\n#endif\n        }\n\n    }\n\n    return ((uint32)x10 >> 16);\n}\n\n#endif\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/vlc_enc_tab.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/******************************************************************************\n *\n * This software module was originally developed by\n *\n * Robert Danielsen (Telenor / ACTS-MoMuSys).\n *\n * and edited by\n *\n * Minhua Zhou (HHI / ACTS-MoMuSys).\n * Luis Ducla-Soares (IST / ACTS-MoMuSys).\n *\n * in the course of development of the MPEG-4 Video (ISO/IEC 14496-2) standard.\n * This software module is an implementation of a part of one or more MPEG-4\n * Video (ISO/IEC 14496-2) tools as specified by the MPEG-4 Video (ISO/IEC\n * 14496-2) standard.\n *\n * ISO/IEC gives users of the MPEG-4 Video (ISO/IEC 14496-2) standard free\n * license to this software module or modifications thereof for use in hardware\n * or software products claiming conformance to the MPEG-4 Video (ISO/IEC\n * 14496-2) standard.\n *\n * Those intending to use this software module in hardware or software products\n * are advised that its use may infringe existing patents. The original\n * developer of this software module and his/her company, the subsequent\n * editors and their companies, and ISO/IEC have no liability for use of this\n * software module or modifications thereof in an implementation. Copyright is\n * not released for non MPEG-4 Video (ISO/IEC 14496-2) standard conforming\n * products.\n *\n * ACTS-MoMuSys partners retain full right to use the code for his/her own\n * purpose, assign or donate the code to a third party and to inhibit third\n * parties from using the code for non MPEG-4 Video (ISO/IEC 14496-2) standard\n * conforming products. This copyright notice must be included in all copies or\n * derivative works.\n *\n * Copyright (c) 1997\n *\n *****************************************************************************/\n\n\n/***********************************************************HeaderBegin*******\n *\n * File:    vlc.h\n *\n * Author:  Robert Danielsen\n * Created: 07.06.96\n *\n * Description: vlc tables for encoder\n *\n * Notes:   Idea taken from MPEG-2 software simulation group\n *\n * Modified:\n *  28.10.96 Robert Danielsen: Added tables for Intra luminance\n *          coefficients\n *      01.05.97 Luis Ducla-Soares: added VM7.0 Reversible VLC tables (RVLC).\n *      13.05.97 Minhua Zhou: added cbpy_tab3,cbpy_tab2\n *\n ***********************************************************HeaderEnd*********/\n\n/************************    INCLUDE FILES    ********************************/\n\n#ifndef _VLC_ENC_TAB_H_\n#define _VLC_ENC_TAB_H_\n\n\n#include \"mp4def.h\"\n/* type definitions for variable length code table entries */\n\n\n\nstatic const Int intra_max_level[2][64] =\n{\n    {27, 10,  5,  4,  3,  3,  3,  3,\n        2,  2,  1,  1,  1,  1,  1,  0,\n        0,  0,  0,  0,  0,  0,  0,  0,\n        0,  0,  0,  0,  0,  0,  0,  0,\n        0,  0,  0,  0,  0,  0,  0,  0,\n        0,  0,  0,  0,  0,  0,  0,  0,\n        0,  0,  0,  0,  0,  0,  0,  0,\n        0,  0,  0,  0,  0,  0,  0,  0,\n    },\n\n    {8,  3,  2,  2,  2,  2,  2,  1,\n     1,  1,  1,  1,  1,  1,  1,  1,\n     1,  1,  1,  1,  1,  0,  0,  0,\n     0,  0,  0,  0,  0,  0,  0,  0,\n     0,  0,  0,  0,  0,  0,  0,  0,\n     0,  0,  0,  0,  0,  0,  0,  0,\n     0,  0,  0,  0,  0,  0,  0,  0,\n     0,  0,  0,  0,  0,  0,  0,  0\n    }\n};\n\n\nstatic const Int inter_max_level[2][64] =\n{\n    {12,  6,  4,  3,  3,  3,  3,  2,\n        2,  2,  2,  1,  1,  1,  1,  1,\n        1,  1,  1,  1,  1,  1,  1,  1,\n        1,  1,  1,  0,  0,  0,  0,  0,\n        0,  0,  0,  0,  0,  0,  0,  0,\n        0,  0,  0,  0,  0,  0,  0,  0,\n        0,  0,  0,  0,  0,  0,  0,  0,\n        0,  0,  0,  0,  0,  0,  0,  0},\n\n    {3,  2,  1,  1,  1,  1,  1,  1,\n     1,  1,  1,  1,  1,  1,  1,  1,\n     1,  1,  1,  1,  1,  1,  1,  1,\n     1,  1,  1,  1,  1,  1,  1,  1,\n     1,  1,  1,  1,  1,  1,  1,  1,\n     1,  0,  0,  0,  0,  0,  0,  0,\n     0,  0,  0,  0,  0,  0,  0,  0,\n     0,  0,  0,  0,  0,  0,  0,  0}\n};\n\n\nstatic const Int intra_max_run0[28] = { 999, 14,  9,  7,  3,  2,  1,\n                                        1,  1,  1,  1,  0,  0,  0,\n                                        0,  0,  0,  0,  0,  0,  0,\n                                        0,  0,  0,  0,  0,  0,  0\n                                      };\n\n\nstatic const Int intra_max_run1[9] = { 999, 20,  6,\n                                       1,  0,  0,\n                                       0,  0,  0\n                                     };\n\nstatic const Int inter_max_run0[13] = { 999,\n                                        26, 10,  6,  2,  1,  1,\n                                        0,  0,  0,  0,  0,  0\n                                      };\n\n\nstatic const Int inter_max_run1[4] = { 999, 40,  1,  0 };\n\n\n\n/* DC prediction sizes */\n\nstatic const VLCtable DCtab_lum[13] =\n{\n    {3, 3}, {3, 2}, {2, 2}, {2, 3}, {1, 3}, {1, 4}, {1, 5}, {1, 6}, {1, 7},\n    {1, 8}, {1, 9}, {1, 10}, {1, 11}\n};\n\nstatic const VLCtable DCtab_chrom[13] =\n{\n    {3, 2}, {2, 2}, {1, 2}, {1, 3}, {1, 4}, {1, 5}, {1, 6}, {1, 7}, {1, 8},\n    {1, 9}, {1, 10}, {1, 11}, {1, 12}\n};\n\n/* Motion vectors */\n\nstatic const VLCtable mvtab[33] =\n{\n    {1, 1}, {1, 2}, {1, 3}, {1, 4}, {3, 6}, {5, 7}, {4, 7}, {3, 7},\n    {11, 9}, {10, 9}, {9, 9}, {17, 10}, {16, 10}, {15, 10}, {14, 10}, {13, 10},\n    {12, 10}, {11, 10}, {10, 10}, {9, 10}, {8, 10}, {7, 10}, {6, 10}, {5, 10},\n    {4, 10}, {7, 11}, {6, 11}, {5, 11}, {4, 11}, {3, 11}, {2, 11}, {3, 12},\n    {2, 12}\n};\n\n\n/* MCBPC Indexing by cbpc in first two bits, mode in last two.\n CBPC as in table 4/H.263, MB type (mode): 3 = 01, 4 = 10.\n Example: cbpc = 01 and mode = 4 gives index = 0110 = 6. */\n\nstatic const VLCtable mcbpc_intra_tab[15] =\n{\n    {0x01, 9}, {0x01, 1}, {0x01, 4}, {0x00, 0},\n    {0x00, 0}, {0x01, 3}, {0x01, 6}, {0x00, 0},\n    {0x00, 0}, {0x02, 3}, {0x02, 6}, {0x00, 0},\n    {0x00, 0}, {0x03, 3}, {0x03, 6}\n};\n\n\n/* MCBPC inter.\n   Addressing: 5 bit ccmmm (cc = CBPC, mmm = mode (1-4 binary)) */\n\nstatic const VLCtable mcbpc_inter_tab[29] =\n{\n    {1, 1}, {3, 3}, {2, 3}, {3, 5}, {4, 6}, {1, 9}, {0, 0}, {0, 0},\n    {3, 4}, {7, 7}, {5, 7}, {4, 8}, {4, 9}, {0, 0}, {0, 0}, {0, 0},\n    {2, 4}, {6, 7}, {4, 7}, {3, 8}, {3, 9}, {0, 0}, {0, 0}, {0, 0},\n    {5, 6}, {5, 9}, {5, 8}, {3, 7}, {2, 9}\n};\n\n\n\n/* CBPY. Straightforward indexing */\n\nstatic const VLCtable cbpy_tab[16] =\n{\n    {3, 4}, {5, 5}, {4, 5}, {9, 4}, {3, 5}, {7, 4}, {2, 6}, {11, 4},\n    {2, 5}, {3, 6}, {5, 4}, {10, 4}, {4, 4}, {8, 4}, {6, 4}, {3, 2}\n};\n\nstatic const VLCtable cbpy_tab3[8] =\n{\n    {3, 3}, {1, 6}, {1, 5}, {2, 3}, {2, 5}, {3, 5}, {1, 3}, {1, 1}\n};\nstatic const VLCtable cbpy_tab2[4] =\n{\n    {1, 4}, {1, 3}, {1, 2}, {1, 1}\n};\n\n/* DCT coefficients. Four tables, two for last = 0, two for last = 1.\n   the sign bit must be added afterwards. */\n\n/* first part of coeffs for last = 0. Indexed by [run][level-1] */\n\nstatic const VLCtable coeff_tab0[2][12] =\n{\n    /* run = 0 */\n    {\n        {0x02, 2}, {0x0f, 4}, {0x15, 6}, {0x17, 7},\n        {0x1f, 8}, {0x25, 9}, {0x24, 9}, {0x21, 10},\n        {0x20, 10}, {0x07, 11}, {0x06, 11}, {0x20, 11}\n    },\n    /* run = 1 */\n    {\n        {0x06, 3}, {0x14, 6}, {0x1e, 8}, {0x0f, 10},\n        {0x21, 11}, {0x50, 12}, {0x00, 0}, {0x00, 0},\n        {0x00, 0}, {0x00, 0}, {0x00, 0}, {0x00, 0}\n    }\n};\n\n/* rest of coeffs for last = 0. indexing by [run-2][level-1] */\n\nstatic const VLCtable coeff_tab1[25][4] =\n{\n    /* run = 2 */\n    {\n        {0x0e, 4}, {0x1d, 8}, {0x0e, 10}, {0x51, 12}\n    },\n    /* run = 3 */\n    {\n        {0x0d, 5}, {0x23, 9}, {0x0d, 10}, {0x00, 0}\n    },\n    /* run = 4-26 */\n    {\n        {0x0c, 5}, {0x22, 9}, {0x52, 12}, {0x00, 0}\n    },\n    {\n        {0x0b, 5}, {0x0c, 10}, {0x53, 12}, {0x00, 0}\n    },\n    {\n        {0x13, 6}, {0x0b, 10}, {0x54, 12}, {0x00, 0}\n    },\n    {\n        {0x12, 6}, {0x0a, 10}, {0x00, 0}, {0x00, 0}\n    },\n    {\n        {0x11, 6}, {0x09, 10}, {0x00, 0}, {0x00, 0}\n    },\n    {\n        {0x10, 6}, {0x08, 10}, {0x00, 0}, {0x00, 0}\n    },\n    {\n        {0x16, 7}, {0x55, 12}, {0x00, 0}, {0x00, 0}\n    },\n    {\n        {0x15, 7}, {0x00, 0}, {0x00, 0}, {0x00, 0}\n    },\n    {\n        {0x14, 7}, {0x00, 0}, {0x00, 0}, {0x00, 0}\n    },\n    {\n        {0x1c, 8}, {0x00, 0}, {0x00, 0}, {0x00, 0}\n    },\n    {\n        {0x1b, 8}, {0x00, 0}, {0x00, 0}, {0x00, 0}\n    },\n    {\n        {0x21, 9}, {0x00, 0}, {0x00, 0}, {0x00, 0}\n    },\n    {\n        {0x20, 9}, {0x00, 0}, {0x00, 0}, {0x00, 0}\n    },\n    {\n        {0x1f, 9}, {0x00, 0}, {0x00, 0}, {0x00, 0}\n    },\n    {\n        {0x1e, 9}, {0x00, 0}, {0x00, 0}, {0x00, 0}\n    },\n    {\n        {0x1d, 9}, {0x00, 0}, {0x00, 0}, {0x00, 0}\n    },\n    {\n        {0x1c, 9}, {0x00, 0}, {0x00, 0}, {0x00, 0}\n    },\n    {\n        {0x1b, 9}, {0x00, 0}, {0x00, 0}, {0x00, 0}\n    },\n    {\n        {0x1a, 9}, {0x00, 0}, {0x00, 0}, {0x00, 0}\n    },\n    {\n        {0x22, 11}, {0x00, 0}, {0x00, 0}, {0x00, 0}\n    },\n    {\n        {0x23, 11}, {0x00, 0}, {0x00, 0}, {0x00, 0}\n    },\n    {\n        {0x56, 12}, {0x00, 0}, {0x00, 0}, {0x00, 0}\n    },\n    {\n        {0x57, 12}, {0x00, 0}, {0x00, 0}, {0x00, 0}\n    }\n};\n\n/* first coeffs of last = 1. indexing by [run][level-1] */\n\nstatic const VLCtable coeff_tab2[2][3] =\n{\n    /* run = 0 */\n    {\n        {0x07, 4}, {0x19, 9}, {0x05, 11}\n    },\n    /* run = 1 */\n    {\n        {0x0f, 6}, {0x04, 11}, {0x00, 0}\n    }\n};\n\n/* rest of coeffs for last = 1. indexing by [run-2] */\n\nstatic const VLCtable coeff_tab3[40] =\n{\n    {0x0e, 6}, {0x0d, 6}, {0x0c, 6},\n    {0x13, 7}, {0x12, 7}, {0x11, 7}, {0x10, 7},\n    {0x1a, 8}, {0x19, 8}, {0x18, 8}, {0x17, 8},\n    {0x16, 8}, {0x15, 8}, {0x14, 8}, {0x13, 8},\n    {0x18, 9}, {0x17, 9}, {0x16, 9}, {0x15, 9},\n    {0x14, 9}, {0x13, 9}, {0x12, 9}, {0x11, 9},\n    {0x07, 10}, {0x06, 10}, {0x05, 10}, {0x04, 10},\n    {0x24, 11}, {0x25, 11}, {0x26, 11}, {0x27, 11},\n    {0x58, 12}, {0x59, 12}, {0x5a, 12}, {0x5b, 12},\n    {0x5c, 12}, {0x5d, 12}, {0x5e, 12}, {0x5f, 12},\n    {0x00, 0}\n};\n\n/* New tables for Intra luminance coefficients. Same codewords,\n   different meaning */\n\n/* Coeffs for last = 0, run = 0. Indexed by [level-1] */\n\nstatic const VLCtable coeff_tab4[27] =\n{\n    /* run = 0 */\n    {0x02, 2}, {0x06, 3}, {0x0f, 4}, {0x0d, 5},\n    {0x0c, 5}, {0x15, 6}, {0x13, 6}, {0x12, 6},\n    {0x17, 7}, {0x1f, 8}, {0x1e, 8}, {0x1d, 8},\n    {0x25, 9}, {0x24, 9}, {0x23, 9}, {0x21, 9},\n    {0x21, 10}, {0x20, 10}, {0x0f, 10}, {0x0e, 10},\n    {0x07, 11}, {0x06, 11}, {0x20, 11}, {0x21, 11},\n    {0x50, 12}, {0x51, 12}, {0x52, 12}\n};\n\n/* Coeffs for last = 0, run = 1. Indexed by [level-1] */\n\nstatic const VLCtable coeff_tab5[10] =\n{\n    {0x0e, 4}, {0x14, 6}, {0x16, 7}, {0x1c, 8},\n    {0x20, 9}, {0x1f, 9}, {0x0d, 10}, {0x22, 11},\n    {0x53, 12}, {0x55, 12}\n};\n\n/* Coeffs for last = 0, run = 2 -> 9. Indexed by [run-2][level-1] */\n\nstatic const VLCtable coeff_tab6[8][5] =\n{\n    /* run = 2 */\n    {\n        {0x0b, 5}, {0x15, 7}, {0x1e, 9}, {0x0c, 10},\n        {0x56, 12}\n    },\n    /* run = 3 */\n    {\n        {0x11, 6}, {0x1b, 8}, {0x1d, 9}, {0x0b, 10},\n        {0x00, 0}\n    },\n    /* run = 4 */\n    {\n        {0x10, 6}, {0x22, 9}, {0x0a, 10}, {0x00, 0},\n        {0x00, 0}\n    },\n    /* run = 5 */\n    {\n        {0x0d, 6}, {0x1c, 9}, {0x08, 10}, {0x00, 0},\n        {0x00, 0}\n    },\n    /* run = 6 */\n    {\n        {0x12, 7}, {0x1b, 9}, {0x54, 12}, {0x00, 0},\n        {0x00, 0}\n    },\n    /* run = 7 */\n    {\n        {0x14, 7}, {0x1a, 9}, {0x57, 12}, {0x00, 0},\n        {0x00, 0}\n    },\n    /* run = 8 */\n    {\n        {0x19, 8}, {0x09, 10}, {0x00, 0}, {0x00, 0},\n        {0x00, 0}\n    },\n    /* run = 9 */\n    {\n        {0x18, 8}, {0x23, 11}, {0x00, 0}, {0x00, 0},\n        {0x00, 0}\n    }\n};\n\n/* Coeffs for last = 0, run = 10 -> 14. Indexed by [run-10] */\n\nstatic const VLCtable coeff_tab7[5] =\n{\n    {0x17, 8}, {0x19, 9}, {0x18, 9}, {0x07, 10},\n    {0x58, 12}\n};\n\n/* Coeffs for last = 1, run = 0. Indexed by [level-1] */\n\nstatic const VLCtable coeff_tab8[8] =\n{\n    {0x07, 4}, {0x0c, 6}, {0x16, 8}, {0x17, 9},\n    {0x06, 10}, {0x05, 11}, {0x04, 11}, {0x59, 12}\n};\n\n/* Coeffs for last = 1, run = 1 -> 6. Indexed by [run-1][level-1] */\n\nstatic const VLCtable coeff_tab9[6][3] =\n{\n    /* run = 1 */\n    {\n        {0x0f, 6}, {0x16, 9}, {0x05, 10}\n    },\n    /* run = 2 */\n    {\n        {0x0e, 6}, {0x04, 10}, {0x00, 0}\n    },\n    /* run = 3 */\n    {\n        {0x11, 7}, {0x24, 11}, {0x00, 0}\n    },\n    /* run = 4 */\n    {\n        {0x10, 7}, {0x25, 11}, {0x00, 0}\n    },\n    /* run = 5 */\n    {\n        {0x13, 7}, {0x5a, 12}, {0x00, 0}\n    },\n    /* run = 6 */\n    {\n        {0x15, 8}, {0x5b, 12}, {0x00, 0}\n    }\n};\n\n/* Coeffs for last = 1, run = 7 -> 20. Indexed by [run-7] */\n\nstatic const VLCtable coeff_tab10[14] =\n{\n    {0x14, 8}, {0x13, 8}, {0x1a, 8}, {0x15, 9},\n    {0x14, 9}, {0x13, 9}, {0x12, 9}, {0x11, 9},\n    {0x26, 11}, {0x27, 11}, {0x5c, 12}, {0x5d, 12},\n    {0x5e, 12}, {0x5f, 12}\n};\n\n\n#ifndef NO_RVLC\n/* RVLC tables */\n/* DCT coefficients. Four tables, two for last = 0, two for last = 1.\n   the sign bit must be added afterwards. */\n\n/* DCT  coeffs (intra) for last = 0.  */\n\n/* Indexed by [level-1] */\n\nstatic const VLCtable coeff_RVLCtab1[27] =\n{\n    /* run = 0 */\n    {     0x6,  3},\n    {     0x7,  3},\n    {     0xa,  4},\n    {     0x9,  5},\n    {    0x14,  6},\n    {    0x15,  6},\n    {    0x34,  7},\n    {    0x74,  8},\n    {    0x75,  8},\n    {    0xdd,  9},\n    {    0xec,  9},\n    {   0x1ec, 10},\n    {   0x1ed, 10},\n    {   0x1f4, 10},\n    {   0x3ec, 11},\n    {   0x3ed, 11},\n    {   0x3f4, 11},\n    {   0x77d, 12},\n    {   0x7bc, 12},\n    {   0xfbd, 13},\n    {   0xfdc, 13},\n    {   0x7bd, 12},\n    {   0xfdd, 13},\n    {  0x1fbd, 14},\n    {  0x1fdc, 14},\n    {  0x1fdd, 14},\n    {  0x1ffc, 15}\n};\n\n\n/* Indexed by [level-1] */\n\nstatic const VLCtable coeff_RVLCtab2[13] =\n{\n    /* run = 1 */\n    {     0x1,  4},\n    {     0x8,  5},\n    {    0x2d,  7},\n    {    0x6c,  8},\n    {    0x6d,  8},\n    {    0xdc,  9},\n    {   0x1dd, 10},\n    {   0x3dc, 11},\n    {   0x3dd, 11},\n    {   0x77c, 12},\n    {   0xfbc, 13},\n    {  0x1f7d, 14},\n    {  0x1fbc, 14}\n};\n\n\n/* Indexed by [level-1] */\n\nstatic const VLCtable coeff_RVLCtab3[11] =\n{\n    /* run = 2 */\n\n    {     0x4,  5},\n    {    0x2c,  7},\n    {    0xbc,  9},\n    {   0x1dc, 10},\n    {   0x3bc, 11},\n    {   0x3bd, 11},\n    {   0xefd, 13},\n    {   0xf7c, 13},\n    {   0xf7d, 13},\n    {  0x1efd, 14},\n    {  0x1f7c, 14}\n};\n\n\n/* Indexed by [level-1] */\n\nstatic const VLCtable coeff_RVLCtab4[9] =\n{\n    /* run = 3 */\n    {     0x5,  5},\n    {    0x5c,  8},\n    {    0xbd,  9},\n    {   0x37d, 11},\n    {   0x6fc, 12},\n    {   0xefc, 13},\n    {  0x1dfd, 14},\n    {  0x1efc, 14},\n    {  0x1ffd, 15}\n};\n\n\n/* Indexed by [run-4][level-1] */\n\nstatic const VLCtable coeff_RVLCtab5[2][6] =\n{\n    /* run = 4 */\n    {\n        {     0xc,  6},\n        {    0x5d,  8},\n        {   0x1bd, 10},\n        {   0x3fd, 12},\n        {   0x6fd, 12},\n        {  0x1bfd, 14}\n    },\n    /* run = 5 */\n    {\n        {     0xd,  6},\n        {    0x7d,  9},\n        {   0x2fc, 11},\n        {   0x5fc, 12},\n        {  0x1bfc, 14},\n        {  0x1dfc, 14}\n    }\n};\n\n\n/* Indexed by [run-6][level-1]       */\n\nstatic const VLCtable coeff_RVLCtab6[2][5] =\n{\n\n    /* run = 6 */\n    {\n        {    0x1c,  7},\n        {   0x17c, 10},\n        {   0x2fd, 11},\n        {   0x5fd, 12},\n        {  0x2ffc, 15}\n    },\n    /* run = 7 */\n    {\n        {    0x1d,  7},\n        {   0x17d, 10},\n        {   0x37c, 11},\n        {   0xdfd, 13},\n        {  0x2ffd, 15}\n    }\n\n};\n/* Indexed by [run-8][level-1] */\n\nstatic const VLCtable coeff_RVLCtab7[2][4] =\n{\n    /* run = 8 */\n    {\n        {    0x3c,  8},\n        {   0x1bc, 10},\n        {   0xbfd, 13},\n        {  0x17fd, 14}\n    },\n    /* run = 9 */\n    {\n        {    0x3d,  8},\n        {   0x1fd, 11},\n        {   0xdfc, 13},\n        {  0x37fc, 15},\n    }\n};\n\n\n\n/* Indexed by [run-10][level-1] */\n\nstatic const VLCtable coeff_RVLCtab8[3][2] =\n{\n    /* run = 10 */\n    {\n        {    0x7c,  9},\n        {   0x3fc, 12}\n    },\n    /* run = 11 */\n    {\n        {    0xfc, 10},\n        {   0xbfc, 13}\n    },\n    /* run = 12 */\n    {\n        {    0xfd, 10},\n        {  0x37fd, 15}\n    }\n};\n\n\n/* Indexed by [level-1] */\n\nstatic const VLCtable coeff_RVLCtab9[7] =\n{\n    /* run = 13 -> 19 */\n    {   0x1fc, 11},\n    {   0x7fc, 13},\n    {   0x7fd, 13},\n    {   0xffc, 14},\n    {   0xffd, 14},\n    {  0x17fc, 14},\n    {  0x3bfc, 15}\n};\n\n\n\n/* first coeffs of last = 1. indexing by [run][level-1] */\n\nstatic const VLCtable coeff_RVLCtab10[2][5] =\n{\n    /* run = 0 */\n    {\n        {     0xb,  4},\n        {    0x78,  8},\n        {   0x3f5, 11},\n        {   0xfec, 13},\n        {  0x1fec, 14}\n    },\n    /* run = 1 */\n    {\n        {    0x12,  5},\n        {    0xed,  9},\n        {   0x7dc, 12},\n        {  0x1fed, 14},\n        {  0x3bfd, 15}\n    }\n\n};\n\nstatic const VLCtable coeff_RVLCtab11[3] =\n{\n    /* run = 2 */\n    {    0x13,  5},\n    {   0x3f8, 11},\n    {  0x3dfc, 15}\n\n};\n\nstatic const VLCtable coeff_RVLCtab12[11][2] =\n{\n    /* run = 3 */\n    {\n        {    0x18,  6},\n        {   0x7dd, 12}\n    },\n    /* run = 4 */\n    {\n        {    0x19,  6},\n        {   0x7ec, 12}\n    },\n    /* run = 5 */\n    {\n        {    0x22,  6},\n        {   0xfed, 13}\n    },\n    /* run = 6 */\n    {\n        {    0x23,  6},\n        {   0xff4, 13}\n    },\n    /* run = 7 */\n    {\n        {    0x35,  7},\n        {   0xff5, 13}\n    },\n    /* run = 8 */\n    {\n        {    0x38,  7},\n        {   0xff8, 13}\n    },\n    /* run = 9 */\n    {\n        {    0x39,  7},\n        {   0xff9, 13}\n    },\n    /* run = 10 */\n    {\n        {    0x42,  7},\n        {  0x1ff4, 14}\n    },\n    /* run = 11 */\n    {\n        {    0x43,  7},\n        {  0x1ff5, 14}\n    },\n    /* run = 12 */\n    {\n        {    0x79,  8},\n        {  0x1ff8, 14}\n    },\n    /* run = 13 */\n    {\n        {    0x82,  8},\n        {  0x3dfd, 15}\n    }\n\n};\n\nstatic const VLCtable coeff_RVLCtab13[32] =\n{\n    /* run = 14 -> 44 */\n    {    0x83,  8},\n    {    0xf4,  9},\n    {    0xf5,  9},\n    {    0xf8,  9},\n    {    0xf9,  9},\n    {   0x102,  9},\n    {   0x103,  9},\n    {   0x1f5, 10},\n    {   0x1f8, 10},\n    {   0x1f9, 10},\n    {   0x202, 10},\n    {   0x203, 10},\n    {   0x3f9, 11},\n    {   0x402, 11},\n    {   0x403, 11},\n    {   0x7ed, 12},\n    {   0x7f4, 12},\n    {   0x7f5, 12},\n    {   0x7f8, 12},\n    {   0x7f9, 12},\n    {   0x802, 12},\n    {   0x803, 12},\n    {  0x1002, 13},\n    {  0x1003, 13},\n    {  0x1ff9, 14},\n    {  0x2002, 14},\n    {  0x2003, 14},\n    {  0x3efc, 15},\n    {  0x3efd, 15},\n    {  0x3f7c, 15},\n    {  0x3f7d, 15}\n};\n\n\n\n/* Coeffs for last = 0, run = 0. Indexed by [level-1] */\n\nstatic const VLCtable coeff_RVLCtab14[19] =\n{\n    /* run = 0 */\n    {     0x6,  3},\n    {     0x1,  4},\n    {     0x4,  5},\n    {    0x1c,  7},\n    {    0x3c,  8},\n    {    0x3d,  8},\n    {    0x7c,  9},\n    {    0xfc, 10},\n    {    0xfd, 10},\n    {   0x1fc, 11},\n    {   0x1fd, 11},\n    {   0x3fc, 12},\n    {   0x7fc, 13},\n    {   0x7fd, 13},\n    {   0xbfc, 13},\n    {   0xbfd, 13},\n    {   0xffc, 14},\n    {   0xffd, 14},\n    {  0x1ffc, 15}\n};\n\nstatic const VLCtable coeff_RVLCtab15[10] =\n{\n    /* run = 1 */\n    {     0x7,  3},\n    {     0xc,  6},\n    {    0x5c,  8},\n    {    0x7d,  9},\n    {   0x17c, 10},\n    {   0x2fc, 11},\n    {   0x3fd, 12},\n    {   0xdfc, 13},\n    {  0x17fc, 14},\n    {  0x17fd, 14}\n};\n\nstatic const VLCtable coeff_RVLCtab16[2][7] =\n{\n    /* run = 2 */\n    {\n        {     0xa,  4},\n        {    0x1d,  7},\n        {    0xbc,  9},\n        {   0x2fd, 11},\n        {   0x5fc, 12},\n        {  0x1bfc, 14},\n        {  0x1bfd, 14}\n    },\n    /* run = 3 */\n    {\n        {     0x5,  5},\n        {    0x5d,  8},\n        {   0x17d, 10},\n        {   0x5fd, 12},\n        {   0xdfd, 13},\n        {  0x1dfc, 14},\n        {  0x1ffd, 15}\n    }\n};\n\nstatic const VLCtable coeff_RVLCtab17[5] =\n{\n    /* run = 4 */\n    {     0x8,  5},\n    {    0x6c,  8},\n    {   0x37c, 11},\n    {   0xefc, 13},\n    {  0x2ffc, 15}\n};\n\nstatic const VLCtable coeff_RVLCtab18[3][4] =\n{\n    /* run = 5 */\n    {\n        {     0x9,  5},\n        {    0xbd,  9},\n        {   0x37d, 11},\n        {   0xefd, 13}\n    },\n    /* run = 6 */\n    {\n        {     0xd,  6},\n        {   0x1bc, 10},\n        {   0x6fc, 12},\n        {  0x1dfd, 14}\n    },\n    /* run = 7 */\n    {\n        {    0x14,  6},\n        {   0x1bd, 10},\n        {   0x6fd, 12},\n        {  0x2ffd, 15}\n    }\n};\n\nstatic const VLCtable coeff_RVLCtab19[2][3] =\n{\n    /* run = 8 */\n    {\n        {    0x15,  6},\n        {   0x1dc, 10},\n        {   0xf7c, 13}\n    },\n    /* run = 9 */\n    {\n        {    0x2c,  7},\n        {   0x1dd, 10},\n        {  0x1efc, 14}\n    }\n};\n\nstatic const VLCtable coeff_RVLCtab20[8][2] =\n{\n    /* run = 10 */\n    {\n        {    0x2d,  7},\n        {   0x3bc, 11}\n    },\n    /* run = 11 */\n    {\n        {    0x34,  7},\n        {   0x77c, 12}\n    },\n    /* run = 12 */\n    {\n        {    0x6d,  8},\n        {   0xf7d, 13}\n    },\n    /* run = 13 */\n    {\n        {    0x74,  8},\n        {  0x1efd, 14}\n    },\n    /* run = 14 */\n    {\n        {    0x75,  8},\n        {  0x1f7c, 14}\n    },\n    /* run = 15 */\n    {\n        {    0xdc,  9},\n        {  0x1f7d, 14}\n    },\n    /* run = 16 */\n    {\n        {    0xdd,  9},\n        {  0x1fbc, 14}\n    },\n    /* run = 17 */\n    {\n        {    0xec,  9},\n        {  0x37fc, 15}\n    }\n};\n\nstatic const VLCtable coeff_RVLCtab21[21] =\n{\n    /* run = 18 -> 38 */\n    {   0x1ec, 10},\n    {   0x1ed, 10},\n    {   0x1f4, 10},\n    {   0x3bd, 11},\n    {   0x3dc, 11},\n    {   0x3dd, 11},\n    {   0x3ec, 11},\n    {   0x3ed, 11},\n    {   0x3f4, 11},\n    {   0x77d, 12},\n    {   0x7bc, 12},\n    {   0x7bd, 12},\n    {   0xfbc, 13},\n    {   0xfbd, 13},\n    {   0xfdc, 13},\n    {   0xfdd, 13},\n    {  0x1fbd, 14},\n    {  0x1fdc, 14},\n    {  0x1fdd, 14},\n    {  0x37fd, 15},\n    {  0x3bfc, 15}\n};\n\n\n/* first coeffs of last = 1. indexing by [run][level-1] */\n\nstatic const VLCtable coeff_RVLCtab22[2][5] =\n{\n    /* run = 0 */\n    {\n        {     0xb,  4},\n        {    0x78,  8},\n        {   0x3f5, 11},\n        {   0xfec, 13},\n        {  0x1fec, 14}\n    },\n    /* run = 1 */\n    {\n        {    0x12,  5},\n        {    0xed,  9},\n        {   0x7dc, 12},\n        {  0x1fed, 14},\n        {  0x3bfd, 15}\n    }\n\n};\n\nstatic const VLCtable coeff_RVLCtab23[3] =\n{\n    /* run = 2 */\n    {    0x13,  5},\n    {   0x3f8, 11},\n    {  0x3dfc, 15}\n\n};\n\nstatic const VLCtable coeff_RVLCtab24[11][2] =\n{\n    /* run = 3 */\n    {\n        {    0x18,  6},\n        {   0x7dd, 12}\n    },\n    /* run = 4 */\n    {\n        {    0x19,  6},\n        {   0x7ec, 12}\n    },\n    /* run = 5 */\n    {\n        {    0x22,  6},\n        {   0xfed, 13}\n    },\n    /* run = 6 */\n    {\n        {    0x23,  6},\n        {   0xff4, 13}\n    },\n    /* run = 7 */\n    {\n        {    0x35,  7},\n        {   0xff5, 13}\n    },\n    /* run = 8 */\n    {\n        {    0x38,  7},\n        {   0xff8, 13}\n    },\n    /* run = 9 */\n    {\n        {    0x39,  7},\n        {   0xff9, 13}\n    },\n    /* run = 10 */\n    {\n        {    0x42,  7},\n        {  0x1ff4, 14}\n    },\n    /* run = 11 */\n    {\n        {    0x43,  7},\n        {  0x1ff5, 14}\n    },\n    /* run = 12 */\n    {\n        {    0x79,  8},\n        {  0x1ff8, 14}\n    },\n    /* run = 13 */\n    {\n        {    0x82,  8},\n        {  0x3dfd, 15}\n    }\n\n};\n\nstatic const VLCtable coeff_RVLCtab25[32] =\n{\n    /* run = 14 -> 44 */\n    {    0x83,  8},\n    {    0xf4,  9},\n    {    0xf5,  9},\n    {    0xf8,  9},\n    {    0xf9,  9},\n    {   0x102,  9},\n    {   0x103,  9},\n    {   0x1f5, 10},\n    {   0x1f8, 10},\n    {   0x1f9, 10},\n    {   0x202, 10},\n    {   0x203, 10},\n    {   0x3f9, 11},\n    {   0x402, 11},\n    {   0x403, 11},\n    {   0x7ed, 12},\n    {   0x7f4, 12},\n    {   0x7f5, 12},\n    {   0x7f8, 12},\n    {   0x7f9, 12},\n    {   0x802, 12},\n    {   0x803, 12},\n    {  0x1002, 13},\n    {  0x1003, 13},\n    {  0x1ff9, 14},\n    {  0x2002, 14},\n    {  0x2003, 14},\n    {  0x3efc, 15},\n    {  0x3efd, 15},\n    {  0x3f7c, 15},\n    {  0x3f7d, 15}\n};\n\n#endif /* NO_RVLC */\n\n#endif /* _VLC_ENC_TAB_H_ */\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/vlc_encode.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n/******************************************************************************\n*\n* This software module was originally developed by\n*\n* Robert Danielsen (Telenor / ACTS-MoMuSys).\n*\n* and edited by\n*\n* Luis Ducla-Soares (IST / ACTS-MoMuSys).\n* Cor Quist (KPN / ACTS-MoMuSys).\n*\n* in the course of development of the MPEG-4 Video (ISO/IEC 14496-2) standard.\n* This software module is an implementation of a part of one or more MPEG-4\n* Video (ISO/IEC 14496-2) tools as specified by the MPEG-4 Video (ISO/IEC\n* 14496-2) standard.\n*\n* ISO/IEC gives users of the MPEG-4 Video (ISO/IEC 14496-2) standard free\n* license to this software module or modifications thereof for use in hardware\n* or software products claiming conformance to the MPEG-4 Video (ISO/IEC\n* 14496-2) standard.\n*\n* Those intending to use this software module in hardware or software products\n* are advised that its use may infringe existing patents. The original\n* developer of this software module and his/her company, the subsequent\n* editors and their companies, and ISO/IEC have no liability for use of this\n* software module or modifications thereof in an implementation. Copyright is\n* not released for non MPEG-4 Video (ISO/IEC 14496-2) standard conforming\n* products.\n*\n* ACTS-MoMuSys partners retain full right to use the code for his/her own\n* purpose, assign or donate the code to a third party and to inhibit third\n* parties from using the code for non MPEG-4 Video (ISO/IEC 14496-2) standard\n* conforming products. This copyright notice must be included in all copies or\n* derivative works.\n*\n* Copyright (c) 1997\n*\n*****************************************************************************/\n\n/***********************************************************HeaderBegin*******\n*\n* File: putvlc.c\n*\n* Author:   Robert Danielsen, Telenor R&D\n* Created:  07.07.96\n*\n* Description: Functions for writing to bitstream\n*\n* Notes:    Same kind of tables as in the MPEG-2 software simulation\n*       group software.\n*\n* Modified:\n*   28.10.96 Robert Danielsen: Added PutCoeff_Intra(), renamed\n*           PutCoeff() to PutCoeff_Inter().\n*   06.11.96 Robert Danielsen: Added PutMCBPC_sep()\n*      01.05.97 Luis Ducla-Soares: added PutCoeff_Intra_RVLC() and\n*                                  PutCoeff_Inter_RVLC().\n*\n***********************************************************HeaderEnd*********/\n\n/************************    INCLUDE FILES    ********************************/\n\n\n#include \"mp4lib_int.h\"\n#include \"mp4enc_lib.h\"\n#include \"vlc_enc_tab.h\"\n#include \"bitstream_io.h\"\n#include \"m4venc_oscl.h\"\n#include \"vlc_encode_inline.h\"\n\ntypedef void (*BlockCodeCoeffPtr)(RunLevelBlock*, BitstreamEncVideo*, Int, Int, UChar) ;\n\nconst static Int mode_MBtype[] =\n{\n    3,\n    0,\n    4,\n    1,\n    2,\n};\n\nconst static Int zigzag_inv[NCOEFF_BLOCK] =\n{\n    0,  1,  8, 16,  9,  2,  3, 10,\n    17, 24, 32, 25, 18, 11,  4,  5,\n    12, 19, 26, 33, 40, 48, 41, 34,\n    27, 20, 13,  6,  7, 14, 21, 28,\n    35, 42, 49, 56, 57, 50, 43, 36,\n    29, 22, 15, 23, 30, 37, 44, 51,\n    58, 59, 52, 45, 38, 31, 39, 46,\n    53, 60, 61, 54, 47, 55, 62, 63\n};\n\n/* Horizontal zigzag inverse */\nconst static Int zigzag_h_inv[NCOEFF_BLOCK] =\n{\n    0, 1, 2, 3, 8, 9, 16, 17,\n    10, 11, 4, 5, 6, 7, 15, 14,\n    13, 12, 19, 18, 24, 25, 32, 33,\n    26, 27, 20, 21, 22, 23, 28, 29,\n    30, 31, 34, 35, 40, 41, 48, 49,\n    42, 43, 36, 37, 38, 39, 44, 45,\n    46, 47, 50, 51, 56, 57, 58, 59,\n    52, 53, 54, 55, 60, 61, 62, 63\n};\n\n/* Vertical zigzag inverse */\nconst static Int zigzag_v_inv[NCOEFF_BLOCK] =\n{\n    0, 8, 16, 24, 1, 9, 2, 10,\n    17, 25, 32, 40, 48, 56, 57, 49,\n    41, 33, 26, 18, 3, 11, 4, 12,\n    19, 27, 34, 42, 50, 58, 35, 43,\n    51, 59, 20, 28, 5, 13, 6, 14,\n    21, 29, 36, 44, 52, 60, 37, 45,\n    53, 61, 22, 30, 7, 15, 23, 31,\n    38, 46, 54, 62, 39, 47, 55, 63\n};\n\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n\n    Int PutCoeff_Inter(Int run, Int level, BitstreamEncVideo *bitstream);\n    Int PutCoeff_Inter_Last(Int run, Int level, BitstreamEncVideo *bitstream);\n    Int PutCoeff_Intra(Int run, Int level, BitstreamEncVideo *bitstream);\n    Int PutCoeff_Intra_Last(Int run, Int level, BitstreamEncVideo *bitstream);\n    Int PutCBPY(Int cbpy, Char intra, BitstreamEncVideo *bitstream);\n    Int PutMCBPC_Inter(Int cbpc, Int mode, BitstreamEncVideo *bitstream);\n    Int PutMCBPC_Intra(Int cbpc, Int mode, BitstreamEncVideo *bitstream);\n    Int PutMV(Int mvint, BitstreamEncVideo *bitstream);\n    Int PutDCsize_chrom(Int size, BitstreamEncVideo *bitstream);\n    Int PutDCsize_lum(Int size, BitstreamEncVideo *bitstream);\n    Int PutDCsize_lum(Int size, BitstreamEncVideo *bitstream);\n#ifndef NO_RVLC\n    Int PutCoeff_Inter_RVLC(Int run, Int level, BitstreamEncVideo *bitstream);\n    Int PutCoeff_Inter_RVLC_Last(Int run, Int level, BitstreamEncVideo *bitstream);\n    Int PutCoeff_Intra_RVLC(Int run, Int level, BitstreamEncVideo *bitstream);\n    Int PutCoeff_Intra_RVLC_Last(Int run, Int level, BitstreamEncVideo *bitstream);\n#endif\n    Int PutRunCoeff_Inter(Int run, Int level, BitstreamEncVideo *bitstream);\n    Int PutRunCoeff_Inter_Last(Int run, Int level, BitstreamEncVideo *bitstream);\n    Int PutRunCoeff_Intra(Int run, Int level, BitstreamEncVideo *bitstream);\n    Int PutRunCoeff_Intra_Last(Int run, Int level, BitstreamEncVideo *bitstream);\n    Int PutLevelCoeff_Inter(Int run, Int level, BitstreamEncVideo *bitstream);\n    Int PutLevelCoeff_Inter_Last(Int run, Int level, BitstreamEncVideo *bitstream);\n    Int PutLevelCoeff_Intra(Int run, Int level, BitstreamEncVideo *bitstream);\n    Int PutLevelCoeff_Intra_Last(Int run, Int level, BitstreamEncVideo *bitstream);\n\n    void RunLevel(VideoEncData *video, Int intra, Int intraDC_decision, Int ncoefblck[]);\n    Int IntraDC_dpcm(Int val, Int lum, BitstreamEncVideo *bitstream);\n    Void DCACPred(VideoEncData *video, UChar Mode, Int *intraDC_decision, Int intraDCVlcQP);\n    Void find_pmvs(VideoEncData *video, Int block, Int *mvx, Int *mvy);\n    Void  WriteMVcomponent(Int f_code, Int dmv, BitstreamEncVideo *bs);\n    static Bool IntraDCSwitch_Decision(Int Mode, Int intra_dc_vlc_threshold, Int intraDCVlcQP);\n\n    Void ScaleMVD(Int  f_code, Int  diff_vector, Int  *residual, Int  *vlc_code_mag);\n\n#ifdef __cplusplus\n}\n#endif\n\nInt\nPutDCsize_lum(Int size, BitstreamEncVideo *bitstream)\n{\n    Int length;\n\n    if (!(size >= 0 && size < 13))\n        return -1;\n\n    length = DCtab_lum[size].len;\n    if (length)\n        BitstreamPutBits(bitstream, length, DCtab_lum[size].code);\n\n    return length;\n}\n\nInt\nPutDCsize_chrom(Int size, BitstreamEncVideo *bitstream)\n{\n    Int length;\n\n    if (!(size >= 0 && size < 13))\n        return -1;\n    length = DCtab_chrom[size].len;\n    if (length)\n        BitstreamPutBits(bitstream, length, DCtab_chrom[size].code);\n\n    return length;\n}\n\nInt\nPutMV(Int mvint, BitstreamEncVideo *bitstream)\n{\n    Int sign = 0;\n    Int absmv;\n    Int length;\n\n    if (mvint > 32)\n    {\n        absmv = -mvint + 65;\n        sign = 1;\n    }\n    else\n        absmv = mvint;\n\n    length = mvtab[absmv].len;\n    if (length)\n        BitstreamPutBits(bitstream, length, mvtab[absmv].code);\n\n    if (mvint != 0)\n    {\n        BitstreamPut1Bits(bitstream, sign);\n        return (length + 1);\n    }\n    else\n        return length;\n}\n\nInt\nPutMCBPC_Intra(Int cbp, Int mode, BitstreamEncVideo *bitstream)\n{\n    Int ind;\n    Int length;\n\n    ind = ((mode_MBtype[mode] >> 1) & 3) | ((cbp & 3) << 2);\n\n    length = mcbpc_intra_tab[ind].len;\n    if (length)\n        BitstreamPutBits(bitstream, length, mcbpc_intra_tab[ind].code);\n\n    return length;\n}\n\nInt\nPutMCBPC_Inter(Int cbp, Int mode, BitstreamEncVideo *bitstream)\n{\n    Int ind;\n    Int length;\n\n    ind = (mode_MBtype[mode] & 7) | ((cbp & 3) << 3);\n\n    length = mcbpc_inter_tab[ind].len;\n    if (length)\n        BitstreamPutBits(bitstream, length, mcbpc_inter_tab[ind].code);\n\n    return length;\n}\n\nInt\nPutCBPY(Int cbpy, Char intra, BitstreamEncVideo *bitstream)\n{\n    Int ind;\n    Int length;\n\n    if ((intra == 0))\n        cbpy = 15 - cbpy;\n\n    ind = cbpy;\n\n    length = cbpy_tab[ind].len;\n    if (length)\n        BitstreamPutBits(bitstream, length, (UInt)cbpy_tab[ind].code);\n\n    return length;\n}\n\n/* 5/16/01, break up function for last and not-last coefficient */\n/* Note:::: I checked the ARM assembly for if( run > x && run < y) type\n    of code, they do a really good job compiling it to if( (UInt)(run-x) < y-x).\n    No need to hand-code it!!!!!, 6/1/2001 */\n\nInt PutCoeff_Inter(Int run, Int level, BitstreamEncVideo *bitstream)\n{\n    Int length = 0;\n\n    if (run < 2 && level < 13)\n    {\n        length = coeff_tab0[run][level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab0[run][level-1].code);\n    }\n    else if (run > 1 && run < 27 && level < 5)\n    {\n        length = coeff_tab1[run-2][level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab1[run-2][level-1].code);\n    }\n\n    return length;\n}\n\nInt PutCoeff_Inter_Last(Int run, Int level, BitstreamEncVideo *bitstream)\n{\n    Int length = 0;\n\n    if (run < 2 && level < 4)\n    {\n        length = coeff_tab2[run][level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab2[run][level-1].code);\n    }\n    else if (run > 1 && run < 42 && level == 1)\n    {\n        length = coeff_tab3[run-2].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab3[run-2].code);\n    }\n\n    return length;\n}\n\n/* 5/16/01, break up function for last and not-last coefficient */\n\nInt PutCoeff_Intra(Int run, Int level, BitstreamEncVideo *bitstream)\n{\n    Int length = 0;\n\n    if (run == 0 && level < 28)\n    {\n        length = coeff_tab4[level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab4[level-1].code);\n    }\n    else if (run == 1 && level < 11)\n    {\n        length = coeff_tab5[level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab5[level-1].code);\n    }\n    else if (run > 1 && run < 10 && level < 6)\n    {\n        length = coeff_tab6[run-2][level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab6[run-2][level-1].code);\n    }\n    else if (run > 9 && run < 15 && level == 1)\n    {\n        length = coeff_tab7[run-10].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab7[run-10].code);\n    }\n\n    return length;\n}\n\nInt PutCoeff_Intra_Last(Int run, Int level, BitstreamEncVideo *bitstream)\n{\n    Int length = 0;\n\n    if (run == 0 && level < 9)\n    {\n        length = coeff_tab8[level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab8[level-1].code);\n    }\n    else if (run > 0 && run < 7 && level < 4)\n    {\n        length = coeff_tab9[run-1][level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab9[run-1][level-1].code);\n    }\n    else if (run > 6 && run < 21 && level == 1)\n    {\n        length = coeff_tab10[run-7].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab10[run-7].code);\n    }\n\n    return length;\n}\n\n/* 5/16/01, break up function for last and not-last coefficient */\n#ifndef NO_RVLC\nInt PutCoeff_Inter_RVLC(Int run, Int level, BitstreamEncVideo *bitstream)\n{\n    Int length = 0;\n\n    if (run == 0 && level < 20)\n    {\n        length =  coeff_RVLCtab14[level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab14[level-1].code);\n    }\n    else if (run == 1 && level < 11)\n    {\n        length = coeff_RVLCtab15[level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab15[level-1].code);\n    }\n    else if (run > 1 && run < 4 && level < 8)\n    {\n        length = coeff_RVLCtab16[run-2][level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab16[run-2][level-1].code);\n    }\n    else if (run == 4 && level < 6)\n    {\n        length = coeff_RVLCtab17[level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab17[level-1].code);\n    }\n    else if (run > 4 && run < 8 && level < 5)\n    {\n        length = coeff_RVLCtab18[run-5][level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab18[run-5][level-1].code);\n    }\n    else if (run > 7 && run < 10 && level < 4)\n    {\n        length = coeff_RVLCtab19[run-8][level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab19[run-8][level-1].code);\n    }\n    else if (run > 9 && run < 18 && level < 3)\n    {\n        length = coeff_RVLCtab20[run-10][level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab20[run-10][level-1].code);\n    }\n    else if (run > 17 && run < 39 && level == 1)\n    {\n        length = coeff_RVLCtab21[run-18].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab21[run-18].code);\n    }\n\n    return length;\n}\n\nInt PutCoeff_Inter_RVLC_Last(Int run, Int level, BitstreamEncVideo *bitstream)\n{\n    Int length = 0;\n\n    if (run >= 0 && run < 2 && level < 6)\n    {\n        length = coeff_RVLCtab22[run][level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab22[run][level-1].code);\n    }\n    else if (run == 2 && level < 4)\n    {\n        length = coeff_RVLCtab23[level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab23[level-1].code);\n    }\n    else if (run > 2 && run < 14 && level < 3)\n    {\n        length = coeff_RVLCtab24[run-3][level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab24[run-3][level-1].code);\n    }\n    else if (run > 13 && run < 45 && level == 1)\n    {\n        length = coeff_RVLCtab25[run-14].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab25[run-14].code);\n    }\n\n    return length;\n}\n\n/* 5/16/01, break up function for last and not-last coefficient */\n\nInt PutCoeff_Intra_RVLC(Int run, Int level, BitstreamEncVideo *bitstream)\n{\n    Int length = 0;\n\n    if (run == 0 && level < 28)\n    {\n        length = coeff_RVLCtab1[level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab1[level-1].code);\n    }\n    else if (run == 1 && level < 14)\n    {\n        length = coeff_RVLCtab2[level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab2[level-1].code);\n    }\n    else if (run == 2 && level < 12)\n    {\n        length = coeff_RVLCtab3[level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab3[level-1].code);\n    }\n    else if (run == 3 && level < 10)\n    {\n        length = coeff_RVLCtab4[level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab4[level-1].code);\n    }\n    else if (run > 3 && run < 6 && level < 7)\n    {\n        length = coeff_RVLCtab5[run-4][level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab5[run-4][level-1].code);\n    }\n    else if (run > 5 && run < 8 && level < 6)\n    {\n        length = coeff_RVLCtab6[run-6][level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab6[run-6][level-1].code);\n    }\n    else if (run > 7 && run < 10 && level < 5)\n    {\n        length = coeff_RVLCtab7[run-8][level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab7[run-8][level-1].code);\n\n    }\n    else if (run > 9 && run < 13 && level < 3)\n    {\n        length = coeff_RVLCtab8[run-10][level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab8[run-10][level-1].code);\n    }\n    else if (run > 12 && run < 20 && level == 1)\n    {\n        length = coeff_RVLCtab9[run-13].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab9[run-13].code);\n    }\n    return length;\n}\n\nInt PutCoeff_Intra_RVLC_Last(Int run, Int level, BitstreamEncVideo *bitstream)\n{\n    Int length = 0;\n\n    if (run >= 0 && run < 2 && level < 6)\n    {\n        length = coeff_RVLCtab10[run][level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab10[run][level-1].code);\n    }\n    else if (run == 2 && level < 4)\n    {\n        length = coeff_RVLCtab11[level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab11[level-1].code);\n    }\n    else if (run > 2 && run < 14 && level < 3)\n    {\n        length = coeff_RVLCtab12[run-3][level-1].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab12[run-3][level-1].code);\n    }\n    else if (run > 13 && run < 45 && level == 1)\n    {\n        length = coeff_RVLCtab13[run-14].len;\n        if (length)\n            BitstreamPutBits(bitstream, length, (UInt)coeff_RVLCtab13[run-14].code);\n    }\n    return length;\n}\n#endif\n\n/* The following is for 3-mode VLC */\n\nInt\nPutRunCoeff_Inter(Int run, Int level, BitstreamEncVideo *bitstream)\n{\n    Int length = 0;\n\n    if (run < 2 && level < 13)\n    {\n        length = coeff_tab0[run][level-1].len;\n        if (length)\n        {\n            BitstreamPutGT8Bits(bitstream, 7 + 2, 14/*3*/);\n            //BitstreamPutBits(bitstream, 2, 2);\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab0[run][level-1].code);\n            length += 9;\n        }\n    }\n    else if (run > 1 && run < 27 && level < 5)\n    {\n        length = coeff_tab1[run-2][level-1].len;\n        if (length)\n        {\n            BitstreamPutGT8Bits(bitstream, 7 + 2, 14/*3*/);\n            //BitstreamPutBits(bitstream, 2, 2);\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab1[run-2][level-1].code);\n            length += 9;\n        }\n    }\n    return length;\n}\n\nInt PutRunCoeff_Inter_Last(Int run, Int level, BitstreamEncVideo *bitstream)\n{\n    Int length = 0;\n\n    if (run < 2 && level < 4)\n    {\n        length = coeff_tab2[run][level-1].len;\n        if (length)\n        {\n            BitstreamPutGT8Bits(bitstream, 7 + 2, 14/*3*/);\n            //BitstreamPutBits(bitstream, 2, 2);\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab2[run][level-1].code);\n            length += 9;\n        }\n    }\n    else if (run > 1 && run < 42 && level == 1)\n    {\n        length = coeff_tab3[run-2].len;\n        if (length)\n        {\n            BitstreamPutGT8Bits(bitstream, 7 + 2, 14/*3*/);\n            //BitstreamPutBits(bitstream, 2, 2);\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab3[run-2].code);\n            length += 9;\n        }\n    }\n    return length;\n}\n\nInt PutRunCoeff_Intra(Int run, Int level, BitstreamEncVideo *bitstream)\n{\n    Int length = 0;\n\n    if (run == 0 && level < 28)\n    {\n        length = coeff_tab4[level-1].len;\n        if (length)\n        {\n            BitstreamPutGT8Bits(bitstream, 7 + 2, 14/*3*/);\n            //BitstreamPutBits(bitstream, 2, 2);\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab4[level-1].code);\n            length += 9;\n        }\n    }\n    else if (run == 1 && level < 11)\n    {\n        length = coeff_tab5[level-1].len;\n        if (length)\n        {\n            BitstreamPutGT8Bits(bitstream, 7 + 2, 14/*3*/);\n            //BitstreamPutBits(bitstream, 2, 2);\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab5[level-1].code);\n            length += 9;\n        }\n    }\n    else if (run > 1 && run < 10 && level < 6)\n    {\n        length = coeff_tab6[run-2][level-1].len;\n        if (length)\n        {\n            BitstreamPutGT8Bits(bitstream, 7 + 2, 14/*3*/);\n            //BitstreamPutBits(bitstream, 2, 2);\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab6[run-2][level-1].code);\n            length += 9;\n        }\n    }\n    else if (run > 9 && run < 15 && level == 1)\n    {\n        length = coeff_tab7[run-10].len;\n        if (length)\n        {\n            BitstreamPutGT8Bits(bitstream, 7 + 2, 14/*3*/);\n            //BitstreamPutBits(bitstream, 2, 2);\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab7[run-10].code);\n            length += 9;\n        }\n    }\n    return length;\n}\nInt PutRunCoeff_Intra_Last(Int run, Int level, BitstreamEncVideo *bitstream)\n{\n    Int length = 0;\n\n    if (run == 0 && level < 9)\n    {\n        length = coeff_tab8[level-1].len;\n        if (length)\n        {\n            BitstreamPutGT8Bits(bitstream, 7 + 2, 14/*3*/);\n            //BitstreamPutBits(bitstream, 2, 2);\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab8[level-1].code);\n            length += 9;\n        }\n    }\n    else if (run > 0 && run < 7 && level < 4)\n    {\n        length = coeff_tab9[run-1][level-1].len;\n        if (length)\n        {\n            BitstreamPutGT8Bits(bitstream, 7 + 2, 14/*3*/);\n            //BitstreamPutBits(bitstream, 2, 2);\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab9[run-1][level-1].code);\n            length += 9;\n        }\n    }\n    else if (run > 6 && run < 21 && level == 1)\n    {\n        length = coeff_tab10[run-7].len;\n        if (length)\n        {\n            BitstreamPutGT8Bits(bitstream, 7 + 2, 14/*3*/);\n            //BitstreamPutBits(bitstream, 2, 2);\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab10[run-7].code);\n            length += 9;\n        }\n    }\n    return length;\n}\n\nInt\nPutLevelCoeff_Inter(Int run, Int level, BitstreamEncVideo *bitstream)\n{\n    Int length = 0;\n\n    if (run < 2 && level < 13)\n    {\n        length = coeff_tab0[run][level-1].len;\n        if (length)\n        {\n            BitstreamPutBits(bitstream, 7 + 1, 6/*3*/);\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab0[run][level-1].code);\n            length += 8;\n        }\n    }\n    else if (run > 1 && run < 27 && level < 5)\n    {\n        length = coeff_tab1[run-2][level-1].len;\n        if (length)\n        {\n            BitstreamPutBits(bitstream, 7 + 1, 6/*3*/);\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab1[run-2][level-1].code);\n            length += 8;\n        }\n    }\n    return length;\n}\n\nInt PutLevelCoeff_Inter_Last(Int run, Int level, BitstreamEncVideo *bitstream)\n{\n    Int length = 0;\n\n    if (run < 2 && level < 4)\n    {\n        length = coeff_tab2[run][level-1].len;\n        if (length)\n        {\n            BitstreamPutBits(bitstream, 7 + 1, 6/*3*/);\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab2[run][level-1].code);\n            length += 8;\n        }\n    }\n    else if (run > 1 && run < 42 && level == 1)\n    {\n        length = coeff_tab3[run-2].len;\n        if (length)\n        {\n            BitstreamPutBits(bitstream, 7 + 1, 6/*3*/);\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab3[run-2].code);\n            length += 8;\n        }\n    }\n    return length;\n}\n\nInt PutLevelCoeff_Intra(Int run, Int level, BitstreamEncVideo *bitstream)\n{\n    Int length = 0;\n\n    if (run == 0 && level < 28)\n    {\n        length = coeff_tab4[level-1].len;\n        if (length)\n        {\n            BitstreamPutBits(bitstream, 7 + 1, 6/*3*/);\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab4[level-1].code);\n            length += 8;\n        }\n    }\n    else if (run == 1 && level < 11)\n    {\n        length = coeff_tab5[level-1].len;\n        if (length)\n        {\n            BitstreamPutBits(bitstream, 7 + 1, 6/*3*/);\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab5[level-1].code);\n            length += 8;\n        }\n    }\n    else if (run > 1 && run < 10 && level < 6)\n    {\n        length = coeff_tab6[run-2][level-1].len;\n        if (length)\n        {\n            BitstreamPutBits(bitstream, 7 + 1, 6/*3*/);\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab6[run-2][level-1].code);\n            length += 8;\n        }\n    }\n    else if (run > 9 && run < 15 && level == 1)\n    {\n        length = coeff_tab7[run-10].len;\n        if (length)\n        {\n            BitstreamPutBits(bitstream, 7 + 1, 6/*3*/);\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab7[run-10].code);\n            length += 8;\n        }\n    }\n    return length;\n}\nInt PutLevelCoeff_Intra_Last(Int run, Int level, BitstreamEncVideo *bitstream)\n{\n    Int length = 0;\n\n    if (run == 0 && level < 9)\n    {\n        length = coeff_tab8[level-1].len;\n        if (length)\n        {\n            BitstreamPutBits(bitstream, 7 + 1, 6/*3*/);\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab8[level-1].code);\n            length += 8;\n        }\n    }\n    else if (run > 0 && run < 7 && level < 4)\n    {\n        length = coeff_tab9[run-1][level-1].len;\n        if (length)\n        {\n            BitstreamPutBits(bitstream, 7 + 1, 6/*3*/);\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab9[run-1][level-1].code);\n            length += 8;\n        }\n    }\n    else if (run > 6 && run < 21 && level == 1)\n    {\n        length = coeff_tab10[run-7].len;\n        if (length)\n        {\n            BitstreamPutBits(bitstream, 7 + 1, 6/*3*/);\n            BitstreamPutBits(bitstream, length, (UInt)coeff_tab10[run-7].code);\n            length += 8;\n        }\n    }\n    return length;\n}\n\n\n\n/* ======================================================================== */\n/*  Function : MBVlcEncode()                                                */\n/*  Date     : 09/10/2000                                                   */\n/*  Purpose  : Encode GOV Header                                            */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified : 5/21/01, break up into smaller functions                     */\n/* ======================================================================== */\n#ifndef H263_ONLY\n/**************************************/\n/* Data Partitioning I-VOP Encoding   */\n/**************************************/\n\nvoid MBVlcEncodeDataPar_I_VOP(\n    VideoEncData *video,\n    Int ncoefblck[],\n    void *blkCodePtr)\n{\n\n    BitstreamEncVideo *bs1 = video->bitstream1;\n    BitstreamEncVideo *bs2 = video->bitstream2;\n    BitstreamEncVideo *bs3 = video->bitstream3;\n    int i;\n    UChar Mode = video->headerInfo.Mode[video->mbnum];\n    UChar CBP;\n//  MacroBlock *MB=video->outputMB;\n    Int mbnum = video->mbnum;\n    Int intraDC_decision, DC;\n//  int temp;\n    Int dquant; /* 3/15/01 */\n    RunLevelBlock *RLB = video->RLB;\n    BlockCodeCoeffPtr BlockCodeCoeff = (BlockCodeCoeffPtr) blkCodePtr;\n\n    /* DC and AC Prediction, 5/28/01, compute CBP, intraDC_decision*/\n    DCACPred(video, Mode, &intraDC_decision, video->QP_prev);\n\n    /* CBP, Run, Level, and Sign */\n    RunLevel(video, 1, intraDC_decision, ncoefblck);\n    CBP = video->headerInfo.CBP[mbnum];\n\n    /* Compute DQuant */\n    dquant = video->QPMB[mbnum] - video->QP_prev; /* 3/15/01, QP_prev may not equal QPMB[mbnum-1] if mbnum-1 is skipped*/\n\n    video->QP_prev = video->QPMB[mbnum];\n\n    if (dquant && Mode == MODE_INTRA)\n    {\n        Mode = MODE_INTRA_Q;\n    }\n\n    if (dquant >= 0)\n        dquant = (PV_ABS(dquant) + 1);\n    else\n        dquant = (PV_ABS(dquant) - 1);\n\n    /* FIRST PART: ALL TO BS1 */\n\n    PutMCBPC_Intra(CBP, Mode, bs1); /* MCBPC */\n\n    if (Mode == MODE_INTRA_Q)\n        /*  MAY NEED TO CHANGE DQUANT HERE  */\n        BitstreamPutBits(bs1, 2, dquant);  /* dquant*/\n\n\n    if (intraDC_decision == 0)\n    {\n        for (i = 0; i < 6; i++)\n        {\n            DC = video->RLB[i].level[0];\n            if (video->RLB[i].s[0])\n                DC = -DC;\n            if (i < 4)\n                /*temp =*/ IntraDC_dpcm(DC, 1, bs1);        /* dct_dc_size_luminance, */\n            else                                    /* dct_dc_differential, and */\n                /*temp =*/ IntraDC_dpcm(DC, 0, bs1);        /* marker bit */\n        }\n    }\n\n    /* SECOND PART: ALL TO BS2*/\n\n    BitstreamPut1Bits(bs2, video->acPredFlag[video->mbnum]);    /* ac_pred_flag */\n\n    /*temp=*/\n    PutCBPY(CBP >> 2, (Char)(1), bs2); /* cbpy */\n\n\n    /* THIRD PART:  ALL TO BS3*/\n    /* MB_CodeCoeff(video,bs3); */ /* 5/22/01, replaced with below */\n    for (i = 0; i < 6; i++)\n    {\n        if (CBP&(1 << (5 - i)))\n            (*BlockCodeCoeff)(&(RLB[i]), bs3, 1 - intraDC_decision, ncoefblck[i], Mode);/* Code Intra AC*/\n    }\n\n    return ;\n}\n\n/************************************/\n/* Data Partitioning P-VOP Encoding */\n/************************************/\n\nvoid MBVlcEncodeDataPar_P_VOP(\n    VideoEncData *video,\n    Int ncoefblck[],\n    void *blkCodePtr)\n{\n\n    BitstreamEncVideo *bs1 = video->bitstream1;\n    BitstreamEncVideo *bs2 = video->bitstream2;\n    BitstreamEncVideo *bs3 = video->bitstream3;\n    int i;\n    Int mbnum = video->mbnum;\n    UChar Mode = video->headerInfo.Mode[mbnum];\n    Int QP_tmp = video->QPMB[mbnum];\n    UChar CBP;\n//  MacroBlock *MB=video->outputMB;\n    Int intra, intraDC_decision, DC;\n    Int pmvx, pmvy;\n//  int temp;\n    Int dquant; /* 3/15/01 */\n    RunLevelBlock *RLB = video->RLB;\n    BlockCodeCoeffPtr BlockCodeCoeff = (BlockCodeCoeffPtr) blkCodePtr;\n\n    intra = (Mode == MODE_INTRA || Mode == MODE_INTRA_Q);\n\n    /* DC and AC Prediction, 5/28/01, compute CBP, intraDC_decision*/\n\n    if (intra)\n    {\n        if (video->usePrevQP)\n        {\n            QP_tmp = video->QPMB[mbnum-1];\n        }\n\n        DCACPred(video, Mode, &intraDC_decision, QP_tmp);\n    }\n    else\n        intraDC_decision = 0; /* used in RunLevel */\n\n    /* CBP, Run, Level, and Sign */\n    RunLevel(video, intra, intraDC_decision, ncoefblck);\n    CBP = video->headerInfo.CBP[mbnum];\n\n    /* Compute DQuant */\n    dquant = video->QPMB[mbnum] - video->QP_prev; /* 3/15/01, QP_prev may not equal QPMB[mbnum-1] if mbnum-1 is skipped*/\n\n    if (dquant && (Mode == MODE_INTRA || Mode == MODE_INTER))\n    {\n        Mode += 2;  /* make it MODE_INTRA_Q and MODE_INTER_Q */\n    }\n\n    if (dquant >= 0)\n        dquant = (PV_ABS(dquant) + 1);\n    else\n        dquant = (PV_ABS(dquant) - 1);\n\n    /* FIRST PART: ALL TO BS1 */\n\n    if (CBP == 0 && intra == 0)  /* Determine if Skipped MB */\n    {\n        if ((Mode == MODE_INTER) && (video->mot[mbnum][0].x == 0) && (video->mot[mbnum][0].y == 0))\n            Mode = video->headerInfo.Mode[video->mbnum] = MODE_SKIPPED;\n        else if ((Mode == MODE_INTER4V) && (video->mot[mbnum][1].x == 0) && (video->mot[mbnum][1].y == 0)\n                 && (video->mot[mbnum][2].x == 0) && (video->mot[mbnum][2].y == 0)\n                 && (video->mot[mbnum][3].x == 0) && (video->mot[mbnum][3].y == 0)\n                 && (video->mot[mbnum][4].x == 0) && (video->mot[mbnum][4].y == 0))\n            Mode = video->headerInfo.Mode[video->mbnum] = MODE_SKIPPED;\n    }\n\n\n    if (Mode == MODE_SKIPPED)\n    {\n        BitstreamPut1Bits(bs1, 1); /* not_coded = 1 */\n        return;\n    }\n    else\n        BitstreamPut1Bits(bs1, 0); /* not_coded =0 */\n\n    video->QP_prev = video->QPMB[mbnum];\n    video->usePrevQP = 1;\n\n    PutMCBPC_Inter(CBP, Mode, bs1); /* MCBPC */\n\n    video->header_bits -= BitstreamGetPos(bs1); /* Header Bits */\n\n    if (Mode == MODE_INTER || Mode == MODE_INTER_Q)\n    {\n        find_pmvs(video, 0, &pmvx, &pmvy); /* Get predicted motion vectors */\n        WriteMVcomponent(video->currVop->fcodeForward, video->mot[mbnum][0].x - pmvx, bs1); /* Write x to bitstream */\n        WriteMVcomponent(video->currVop->fcodeForward, video->mot[mbnum][0].y - pmvy, bs1);     /* Write y to bitstream */\n    }\n    else if (Mode == MODE_INTER4V)\n    {\n        for (i = 1; i < 5; i++)\n        {\n            find_pmvs(video, i, &pmvx, &pmvy);\n            WriteMVcomponent(video->currVop->fcodeForward, video->mot[mbnum][i].x - pmvx, bs1);\n            WriteMVcomponent(video->currVop->fcodeForward, video->mot[mbnum][i].y - pmvy, bs1);\n        }\n    }\n    video->header_bits += BitstreamGetPos(bs1); /* Header Bits */\n\n    /* SECOND PART: ALL TO BS2 */\n\n\n    if (intra)\n    {\n        BitstreamPut1Bits(bs2, video->acPredFlag[video->mbnum]);    /* ac_pred_flag */\n        /*temp=*/\n        PutCBPY(CBP >> 2, (Char)(Mode == MODE_INTRA || Mode == MODE_INTRA_Q), bs2); /* cbpy */\n\n        if (Mode == MODE_INTRA_Q)\n            BitstreamPutBits(bs2, 2, dquant);  /* dquant, 3/15/01*/\n\n        if (intraDC_decision == 0)\n        {\n            for (i = 0; i < 6; i++)\n            {\n                DC = video->RLB[i].level[0];\n                if (video->RLB[i].s[0])\n                    DC = -DC;\n                if (i < 4)\n                    /*temp =*/ IntraDC_dpcm(DC, 1, bs2);        /* dct_dc_size_luminance, */\n                else                                    /* dct_dc_differential, and */\n                    /*temp =*/ IntraDC_dpcm(DC, 0, bs2);        /* marker bit */\n            }\n        }\n\n        /****************************/  /* THIRD PART: ALL TO BS3 */\n        for (i = 0; i < 6; i++)\n        {\n            if (CBP&(1 << (5 - i)))\n                (*BlockCodeCoeff)(&(RLB[i]), bs3, 1 - intraDC_decision, ncoefblck[i], Mode);/* Code Intra AC*/\n        }\n    }\n    else\n    {\n        /*temp=*/\n        PutCBPY(CBP >> 2, (Char)(Mode == MODE_INTRA || Mode == MODE_INTRA_Q), bs2); /* cbpy */\n        if (Mode == MODE_INTER_Q)\n            /*  MAY NEED TO CHANGE DQUANT HERE  */\n            BitstreamPutBits(bs2, 2, dquant);  /* dquant, 3/15/01*/\n\n        /****************************/  /* THIRD PART: ALL TO BS3 */\n        for (i = 0; i < 6; i++)\n        {\n            if (CBP&(1 << (5 - i)))\n                (*BlockCodeCoeff)(&(RLB[i]), bs3, 0, ncoefblck[i], Mode);/* Code Intra AC*/\n        }\n    }\n\n    return ;\n}\n#endif /* H263_ONLY */\n/****************************************************************************************/\n/* Short Header/Combined Mode with or without Error Resilience I-VOP and P-VOP Encoding */\n/* 5/21/01, B-VOP is not implemented yet!!!!                                            */\n/****************************************************************************************/\n\nvoid MBVlcEncodeCombined_I_VOP(\n    VideoEncData *video,\n    Int ncoefblck[],\n    void *blkCodePtr)\n{\n\n    BitstreamEncVideo *bs1 = video->bitstream1;\n//  BitstreamEncVideo *bs2 = video->bitstream2;\n//  BitstreamEncVideo *bs3 = video->bitstream3;\n    int i;\n    UChar Mode = video->headerInfo.Mode[video->mbnum];\n    UChar CBP = video->headerInfo.CBP[video->mbnum];\n//  MacroBlock *MB=video->outputMB;\n    Int mbnum = video->mbnum;\n    Int intraDC_decision;\n//  int temp;\n    Int dquant; /* 3/15/01 */\n    RunLevelBlock *RLB = video->RLB;\n    Int DC;\n    Int shortVideoHeader = video->vol[video->currLayer]->shortVideoHeader;\n    BlockCodeCoeffPtr BlockCodeCoeff = (BlockCodeCoeffPtr) blkCodePtr;\n\n    /* DC and AC Prediction, 5/28/01, compute CBP, intraDC_decision*/\n\n#ifndef H263_ONLY\n    if (!shortVideoHeader)\n        DCACPred(video, Mode, &intraDC_decision, video->QP_prev);\n    else\n#endif\n    {\n        intraDC_decision = 0;\n    }\n\n    /* CBP, Run, Level, and Sign */\n\n    RunLevel(video, 1, intraDC_decision, ncoefblck);\n    CBP = video->headerInfo.CBP[mbnum];\n\n    /* Compute DQuant */\n    dquant = video->QPMB[mbnum] - video->QP_prev; /* 3/15/01, QP_prev may not equal QPMB[mbnum-1] if mbnum-1 is skipped*/\n\n    video->QP_prev = video->QPMB[mbnum];\n\n    if (dquant && Mode == MODE_INTRA)\n    {\n        Mode = MODE_INTRA_Q;\n    }\n\n    if (dquant >= 0)\n        dquant = (PV_ABS(dquant) + 1);\n    else\n        dquant = (PV_ABS(dquant) - 1);\n\n    PutMCBPC_Intra(CBP, Mode, bs1); /* mcbpc I_VOP */\n\n    if (!video->vol[video->currLayer]->shortVideoHeader)\n    {\n        BitstreamPut1Bits(bs1, video->acPredFlag[video->mbnum]);    /* ac_pred_flag */\n    }\n\n    /*temp=*/\n    PutCBPY(CBP >> 2, (Char)(1), bs1); /* cbpy */\n\n    if (Mode == MODE_INTRA_Q)\n        /*  MAY NEED TO CHANGE DQUANT HERE */\n        BitstreamPutBits(bs1, 2, dquant);  /* dquant, 3/15/01*/\n\n    /*MB_CodeCoeff(video,bs1); 5/21/01, replaced by below */\n    /*******************/\n#ifndef H263_ONLY\n    if (shortVideoHeader) /* Short Header DC coefficients */\n    {\n#endif\n        for (i = 0; i < 6; i++)\n        {\n            DC = RLB[i].level[0];\n            if (RLB[i].s[0])\n                DC = -DC;\n            if (DC != 128)\n                BitstreamPutBits(bs1, 8, DC);   /* intra_dc_size_luminance */\n            else\n                BitstreamPutBits(bs1, 8, 255);          /* intra_dc_size_luminance */\n            if (CBP&(1 << (5 - i)))\n                (*BlockCodeCoeff)(&(RLB[i]), bs1, 1, ncoefblck[i], Mode); /* Code short header Intra AC*/\n        }\n#ifndef H263_ONLY\n    }\n    else if (intraDC_decision == 0)   /* Combined Intra Mode DC and AC coefficients */\n    {\n        for (i = 0; i < 6; i++)\n        {\n            DC = RLB[i].level[0];\n            if (RLB[i].s[0])\n                DC = -DC;\n\n            if (i < 4)\n                /*temp =*/ IntraDC_dpcm(DC, 1, bs1);        /* dct_dc_size_luminance, */\n            else                                                /* dct_dc_differential, and */\n                /*temp =*/ IntraDC_dpcm(DC, 0, bs1);        /* marker bit */\n            if (CBP&(1 << (5 - i)))\n                (*BlockCodeCoeff)(&(RLB[i]), bs1, 1, ncoefblck[i], Mode);/* Code Intra AC */\n        }\n    }\n    else   /* Combined Mode Intra DC/AC coefficients */\n    {\n        for (i = 0; i < 6; i++)\n        {\n            if (CBP&(1 << (5 - i)))\n                (*BlockCodeCoeff)(&(RLB[i]), bs1, 0, ncoefblck[i], Mode);/* Code Intra AC */\n        }\n    }\n#endif\n    /*******************/\n    return ;\n}\n\nvoid MBVlcEncodeCombined_P_VOP(\n    VideoEncData *video,\n    Int ncoefblck[],\n    void *blkCodePtr)\n{\n\n    BitstreamEncVideo *bs1 = video->bitstream1;\n//  BitstreamEncVideo *bs2 = video->bitstream2;\n//  BitstreamEncVideo *bs3 = video->bitstream3;\n    int i;\n    Int mbnum = video->mbnum;\n    UChar Mode = video->headerInfo.Mode[mbnum];\n    Int QP_tmp = video->QPMB[mbnum];\n    UChar CBP ;\n//  MacroBlock *MB=video->outputMB;\n    Int intra, intraDC_decision;\n    Int pmvx, pmvy;\n//  int temp;\n    Int dquant; /* 3/15/01 */\n    RunLevelBlock *RLB = video->RLB;\n    Int DC;\n    Int shortVideoHeader = video->vol[video->currLayer]->shortVideoHeader;\n    BlockCodeCoeffPtr BlockCodeCoeff = (BlockCodeCoeffPtr) blkCodePtr;\n\n    intra = (Mode == MODE_INTRA || Mode == MODE_INTRA_Q);\n\n    /* DC and AC Prediction, 5/28/01, compute intraDC_decision*/\n#ifndef H263_ONLY\n    if (!shortVideoHeader && intra)\n    {\n        if (video->usePrevQP)\n        {\n            QP_tmp = video->QPMB[mbnum-1];\n        }\n        DCACPred(video, Mode, &intraDC_decision, QP_tmp);\n    }\n    else\n#endif\n        intraDC_decision = 0;\n\n    /* CBP, Run, Level, and Sign */\n\n    RunLevel(video, intra, intraDC_decision, ncoefblck);\n    CBP = video->headerInfo.CBP[mbnum];\n\n    /* Compute DQuant */\n    dquant = video->QPMB[mbnum] - video->QP_prev; /* 3/15/01, QP_prev may not equal QPMB[mbnum-1] if mbnum-1 is skipped*/\n    if (dquant && (Mode == MODE_INTRA || Mode == MODE_INTER))\n    {\n        Mode += 2;  /* make it MODE_INTRA_Q and MODE_INTER_Q */\n    }\n\n    if (dquant >= 0)\n        dquant = (PV_ABS(dquant) + 1);\n    else\n        dquant = (PV_ABS(dquant) - 1);\n\n    if (CBP == 0 && intra == 0)  /* Determine if Skipped MB */\n    {\n        if ((Mode == MODE_INTER) && (video->mot[mbnum][0].x == 0) && (video->mot[mbnum][0].y == 0))\n            Mode = video->headerInfo.Mode[video->mbnum] = MODE_SKIPPED;\n        else if ((Mode == MODE_INTER4V) && (video->mot[mbnum][1].x == 0) && (video->mot[mbnum][1].y == 0)\n                 && (video->mot[mbnum][2].x == 0) && (video->mot[mbnum][2].y == 0)\n                 && (video->mot[mbnum][3].x == 0) && (video->mot[mbnum][3].y == 0)\n                 && (video->mot[mbnum][4].x == 0) && (video->mot[mbnum][4].y == 0))\n            Mode = video->headerInfo.Mode[video->mbnum] = MODE_SKIPPED;\n    }\n\n    if (Mode == MODE_SKIPPED)\n    {\n        BitstreamPut1Bits(bs1, 1); /* not_coded = 1 */\n        return;\n    }\n    else\n        BitstreamPut1Bits(bs1, 0); /* not_coded =0 */\n\n    video->QP_prev = video->QPMB[mbnum];\n    video->usePrevQP = 1;\n\n    PutMCBPC_Inter(CBP, Mode, bs1); /* mcbpc P_VOP */\n\n    if (!video->vol[video->currLayer]->shortVideoHeader && intra)\n    {\n        BitstreamPut1Bits(bs1, video->acPredFlag[video->mbnum]);    /* ac_pred_flag */\n    }\n\n    /*temp=*/\n    PutCBPY(CBP >> 2, (Char)(intra), bs1); /* cbpy */\n\n    if (Mode == MODE_INTRA_Q || Mode == MODE_INTER_Q)\n        /*  MAY NEED TO CHANGE DQUANT HERE  */\n        BitstreamPutBits(bs1, 2, dquant);  /* dquant, 3/15/01*/\n\n    video->header_bits -= BitstreamGetPos(bs1); /* Header Bits */\n\n    if (!((video->vol[video->currLayer]->scalability) && (video->currVop->refSelectCode == 3)))\n    {\n        if (Mode == MODE_INTER || Mode == MODE_INTER_Q)\n        {\n            find_pmvs(video, 0, &pmvx, &pmvy); /* Get predicted motion vectors */\n            WriteMVcomponent(video->currVop->fcodeForward, video->mot[mbnum][0].x - pmvx, bs1); /* Write x to bitstream */\n            WriteMVcomponent(video->currVop->fcodeForward, video->mot[mbnum][0].y - pmvy, bs1);     /* Write y to bitstream */\n        }\n        else if (Mode == MODE_INTER4V)\n        {\n            for (i = 1; i < 5; i++)\n            {\n                find_pmvs(video, i, &pmvx, &pmvy);\n                WriteMVcomponent(video->currVop->fcodeForward, video->mot[mbnum][i].x - pmvx, bs1);\n                WriteMVcomponent(video->currVop->fcodeForward, video->mot[mbnum][i].y - pmvy, bs1);\n            }\n        }\n    }\n    video->header_bits += BitstreamGetPos(bs1); /* Header Bits */\n\n    /* MB_CodeCoeff(video,bs1); */ /* 5/22/01, replaced with below */\n    /****************************/\n    if (intra)\n    {\n#ifndef H263_ONLY\n        if (shortVideoHeader) /* Short Header DC coefficients */\n        {\n#endif\n            for (i = 0; i < 6; i++)\n            {\n                DC = RLB[i].level[0];\n                if (RLB[i].s[0])\n                    DC = -DC;\n                if (DC != 128)\n                    BitstreamPutBits(bs1, 8, DC);   /* intra_dc_size_luminance */\n                else\n                    BitstreamPutBits(bs1, 8, 255);          /* intra_dc_size_luminance */\n                if (CBP&(1 << (5 - i)))\n                    (*BlockCodeCoeff)(&(RLB[i]), bs1, 1, ncoefblck[i], Mode); /* Code short header Intra AC*/\n            }\n#ifndef H263_ONLY\n        }\n        else if (intraDC_decision == 0)   /* Combined Intra Mode DC and AC coefficients */\n        {\n            for (i = 0; i < 6; i++)\n            {\n                DC = RLB[i].level[0];\n                if (RLB[i].s[0])\n                    DC = -DC;\n\n                if (i < 4)\n                    /*temp =*/ IntraDC_dpcm(DC, 1, bs1);        /* dct_dc_size_luminance, */\n                else                                                /* dct_dc_differential, and */\n                    /*temp =*/ IntraDC_dpcm(DC, 0, bs1);        /* marker bit */\n                if (CBP&(1 << (5 - i)))\n                    (*BlockCodeCoeff)(&(RLB[i]), bs1, 1, ncoefblck[i], Mode);/* Code Intra AC */\n            }\n        }\n        else   /* Combined Mode Intra DC/AC coefficients */\n        {\n            for (i = 0; i < 6; i++)\n            {\n                if (CBP&(1 << (5 - i)))\n                    (*BlockCodeCoeff)(&(RLB[i]), bs1, 0, ncoefblck[i], Mode);/* Code Intra AC */\n            }\n        }\n#endif\n    }\n    else   /* Shortheader or Combined INTER Mode AC coefficients */\n    {\n        for (i = 0; i < 6; i++)\n        {\n            if (CBP&(1 << (5 - i)))\n                (*BlockCodeCoeff)(&(RLB[i]), bs1, 0, ncoefblck[i], Mode);/* Code Inter AC*/\n        }\n    }\n    /****************************/\n\n    return ;\n}\n\n/* ======================================================================== */\n/*  Function : BlockCodeCoeff()                                         */\n/*  Date     : 09/18/2000                                                   */\n/*  Purpose  : VLC Encode  AC/DC coeffs                                     */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :  5/16/01  grouping BitstreamPutBits calls                    */\n/*              5/22/01  break up function                              */\n/* ======================================================================== */\n#ifndef NO_RVLC\n/*****************/\n/* RVLC ENCODING */\n/*****************/\nVoid BlockCodeCoeff_RVLC(RunLevelBlock *RLB, BitstreamEncVideo *bs, Int j_start, Int j_stop, UChar Mode)\n{\n    int length = 0;\n    int i;\n    Int level;\n    Int run;\n    Int intra = (Mode == MODE_INTRA || Mode == MODE_INTRA_Q);\n\n    /* Not Last Coefficient */\n    for (i = j_start; i < j_stop - 1; i++)\n    {\n        run = RLB->run[i];\n        level = RLB->level[i];\n        //if(i==63||RLB->run[i+1] == -1)    /* Don't Code Last Coefficient Here */\n        //  break;\n        /*ENCODE RUN LENGTH */\n        if (level < 28 && run < 39)\n        {\n            if (intra)\n                length = PutCoeff_Intra_RVLC(run, level, bs);\n            else\n                length = PutCoeff_Inter_RVLC(run, level, bs);\n        }\n        else\n            length = 0;\n        /* ESCAPE CODING */\n        if (length == 0)\n        {\n            BitstreamPutBits(bs, 5 + 1, 2); /* ESCAPE + Not Last Coefficient */\n            //BitstreamPutBits(bs,1,0); /* Not Last Coefficient */\n            BitstreamPutBits(bs, 6 + 1, (run << 1) | 1); /* RUN + MARKER BIT*/\n            //BitstreamPutBits(bs,1,1);  /* MARKER BIT */\n            BitstreamPutGT8Bits(bs, 11, level); /* LEVEL */\n            BitstreamPutBits(bs, 1 + 4, 16); /* MARKER BIT */\n            //BitstreamPutBits(bs,4,0);  /* RVLC TRAILING ESCAPE */\n        }\n        BitstreamPutBits(bs, 1, RLB->s[i]); /* SIGN BIT */\n    }\n    /* Last Coefficient!!! */\n    run = RLB->run[i];\n    level = RLB->level[i];\n\n    /*ENCODE RUN LENGTH */\n    if (level < 6 && run < 45)\n    {\n        if (intra)\n            length = PutCoeff_Intra_RVLC_Last(run, level, bs);\n        else\n            length = PutCoeff_Inter_RVLC_Last(run, level, bs);\n    }\n    else\n        length = 0;\n    /* ESCAPE CODING */\n    if (length == 0)\n    {\n        BitstreamPutBits(bs, 5 + 1, 3); /* ESCAPE CODE + Last Coefficient*/\n        //BitstreamPutBits(bs,1,1); /* Last Coefficient !*/\n        BitstreamPutBits(bs, 6 + 1, (run << 1) | 1); /* RUN + MARKER BIT*/\n        //BitstreamPutBits(bs,1,1);  /* MARKER BIT */\n        BitstreamPutGT8Bits(bs, 11, level); /* LEVEL */\n        BitstreamPutBits(bs, 1 + 4, 16); /* MARKER BIT + RVLC TRAILING ESCAPE */\n        //BitstreamPutBits(bs,4,0);  /* */\n    }\n    BitstreamPut1Bits(bs, RLB->s[i]); /* SIGN BIT */\n\n    return ;\n}\n#endif\n/*******************************/\n/* SHORT VIDEO HEADER ENCODING */\n/*******************************/\n\nVoid BlockCodeCoeff_ShortHeader(RunLevelBlock *RLB, BitstreamEncVideo *bs, Int j_start, Int j_stop, UChar Mode)\n{\n    int length = 0;\n    int i;\n//  int temp;\n    Int level;\n    Int run;\n\n    OSCL_UNUSED_ARG(Mode);\n\n    /* Not Last Coefficient */\n    for (i = j_start; i < j_stop - 1; i++)\n    {\n        run = RLB->run[i];\n        level = RLB->level[i];\n//      if(i==63 ||RLB->run[i+1] == -1) /* Don't Code Last Coefficient Here */\n//          break;\n        /*ENCODE RUN LENGTH */\n        if (level < 13)\n        {\n            length = PutCoeff_Inter(run, level, bs);\n            if (length != 0)\n                /*temp =*/ BitstreamPut1Bits(bs, RLB->s[i]); /* Sign Bit */\n        }\n        else\n            length = 0;\n        /* ESCAPE CODING */\n        if (length == 0)\n        {\n            if (RLB->s[i])\n                level = -level;\n            BitstreamPutBits(bs, 7 + 1, 6); /* ESCAPE CODE + Not Last Coefficient */\n            //BitstreamPutBits(bs,1,0); /* Not Last Coefficient */\n            BitstreamPutBits(bs, 6, run); /* RUN */\n            BitstreamPutBits(bs, 8, level&0xFF); /* LEVEL, mask to make sure length 8 */\n        }\n    }\n    /* Last Coefficient!!! */\n    run = RLB->run[i];\n    level = RLB->level[i];\n\n    /*ENCODE RUN LENGTH */\n    if (level < 13)\n    {\n        length = PutCoeff_Inter_Last(run, level, bs);\n        if (length != 0)\n            /*temp =*/ BitstreamPut1Bits(bs, RLB->s[i]); /* Sign Bit */\n    }\n    else\n        length = 0;\n    /* ESCAPE CODING */\n    if (length == 0)\n    {\n        if (RLB->s[i])\n            level = -level;\n        BitstreamPutBits(bs, 7 + 1, 7); /* ESCAPE CODE + Last Coefficient */\n        //BitstreamPutBits(bs,1,1); /* Last Coefficient !!!*/\n        BitstreamPutBits(bs, 6, run); /* RUN */\n        BitstreamPutBits(bs, 8, level&0xFF); /* LEVEL, mask to make sure length 8  */\n    }\n\n    return ;\n\n}\n\n#ifndef H263_ONLY\n/****************/\n/* VLC ENCODING */\n/****************/\nVoid BlockCodeCoeff_Normal(RunLevelBlock *RLB, BitstreamEncVideo *bs, Int j_start, Int j_stop, UChar Mode)\n{\n    int length = 0;\n    int i;\n    //int temp;\n    Int level;\n    Int run;\n    Int intra = (Mode == MODE_INTRA || Mode == MODE_INTRA_Q);\n    Int level_minus_max;\n    Int run_minus_max;\n    Int(*PutCoeff)(Int, Int, BitstreamEncVideo *); /* pointer to functions, 5/28/01 */\n\n    /* Not Last Coefficient!!! */\n\n    if (intra)\n        PutCoeff = &PutCoeff_Intra;\n    else\n        PutCoeff = &PutCoeff_Inter;\n\n    for (i = j_start; i < j_stop - 1; i++)\n    {\n        run = RLB->run[i];\n        level = RLB->level[i];\n\n        /* Encode Run Length */\n        if (level < 28)\n        {\n            length = (*PutCoeff)(run, level, bs); /* 5/28/01 replaces above */\n        }\n        else\n        {\n            length = 0;\n        }\n\n        /* First escape mode: LEVEL OFFSET */\n        if (length == 0)\n        {\n            if (intra)\n            {\n                level_minus_max = level - intra_max_level[0][run];\n                if (level_minus_max < 28)\n                    length = PutLevelCoeff_Intra(run, level_minus_max, bs);\n                else\n                    length = 0;\n            }\n            else\n            {\n                level_minus_max = level - inter_max_level[0][run];\n                if (level_minus_max < 13)\n                    length = PutLevelCoeff_Inter(run, level_minus_max, bs);\n                else\n                    length = 0;\n            }\n\n            /* Second escape mode: RUN OFFSET */\n            if (length == 0)\n            {\n                if (level < 28)\n                {\n                    if (intra)\n                    {\n                        run_minus_max = run - (intra_max_run0[level] + 1);\n                        length = PutRunCoeff_Intra(run_minus_max, level, bs);\n                    }\n                    else if (level < 13)\n                    {\n                        run_minus_max = run - (inter_max_run0[level] + 1);\n                        length = PutRunCoeff_Inter(run_minus_max, level, bs);\n                    }\n                    else\n                    {\n                        length = 0;\n                    }\n                }\n                else\n                {\n                    length = 0;\n                }\n\n                /* Third escape mode: FIXED LENGTH CODE */\n                if (length == 0)\n                {\n                    if (RLB->s[i])\n                        level = -level;\n                    /*temp =*/\n                    BitstreamPutBits(bs, 7 + 2 + 1, 30); /* ESCAPE CODE + Followed by 11 + Not Last Coefficient*/\n                    //temp = BitstreamPutBits(bs,2,3); /* Followed by 11 */\n                    //temp = BitstreamPutBits(bs, 1, 0); /* Not Last Coefficient*/\n                    /*temp =*/\n                    BitstreamPutBits(bs, 6 + 1, (run << 1) | 1); /* Encode Run + Marker Bit */\n                    //temp = BitstreamPutBits(bs,1,1); /* Marker Bit */\n                    /*temp =*/\n                    BitstreamPutGT8Bits(bs, 12 + 1, ((level << 1) | 1)&0x1FFF); /* Encode Level, mask to make sure length 12  */\n                    //temp = BitstreamPutBits(bs,1,1); /* Marker Bit */\n                }\n            }\n        }\n\n        /* Encode Sign Bit */\n        if (length != 0)\n            /*temp =*/ BitstreamPut1Bits(bs, RLB->s[i]); /* Sign Bit */\n\n    }\n    /* Last Coefficient */\n    run = RLB->run[i];\n    level = RLB->level[i];\n\n    /* Encode Run Length */\n    if (level < 9)\n    {\n        if (intra)\n        {\n            length = PutCoeff_Intra_Last(run, level, bs);\n        }\n        else if (level < 4)\n        {\n            length = PutCoeff_Inter_Last(run, level, bs);\n        }\n        else\n        {\n            length = 0;\n        }\n    }\n    else\n    {\n        length = 0;\n    }\n\n    /* First escape mode: LEVEL OFFSET */\n    if (length == 0)\n    {\n        if (intra)\n        {\n            level_minus_max = level - intra_max_level[1][run];\n            if (level_minus_max < 9)\n                length = PutLevelCoeff_Intra_Last(run, level_minus_max, bs);\n            else\n                length = 0;\n        }\n        else\n        {\n            level_minus_max = level - inter_max_level[1][run];\n            if (level_minus_max < 4)\n                length = PutLevelCoeff_Inter_Last(run, level_minus_max, bs);\n            else\n                length = 0;\n        }\n        /* Second escape mode: RUN OFFSET */\n        if (length == 0)\n        {\n            if (level < 9)\n            {\n                if (intra)\n                {\n                    run_minus_max = run - (intra_max_run1[level] + 1);\n                    length = PutRunCoeff_Intra_Last(run_minus_max, level, bs);\n                }\n                else if (level < 4)\n                {\n                    run_minus_max = run - (inter_max_run1[level] + 1);\n                    length = PutRunCoeff_Inter_Last(run_minus_max, level, bs);\n                }\n                else\n                {\n                    length = 0;\n                }\n            }\n            else\n            {\n                length = 0;\n            }\n            /* Third escape mode: FIXED LENGTH CODE */\n            if (length == 0)\n            {\n                if (RLB->s[i])\n                    level = -level;\n                /*temp =*/\n                BitstreamPutGT8Bits(bs, 7 + 2 + 1, 31); /* ESCAPE CODE + Followed by 11 + Last Coefficient*/\n                //temp = BitstreamPutBits(bs,2,3); /* Followed by 11 */\n                //temp = BitstreamPutBits(bs, 1, 1); /* Last Coefficient!!!*/\n                /*temp =*/\n                BitstreamPutBits(bs, 6 + 1, (run << 1) | 1); /* Encode Run + Marker Bit */\n                //temp = BitstreamPutBits(bs,1,1); /* Marker Bit */\n                /*temp =*/\n                BitstreamPutGT8Bits(bs, 12 + 1, ((level << 1) | 1)&0x1FFF); /* Encode Level, mask to make sure length 8 */\n                //temp = BitstreamPutBits(bs,1,1); /* Marker Bit */\n            }\n        }\n    }\n\n    /* Encode Sign Bit */\n    if (length != 0)\n        /*temp =*/ BitstreamPut1Bits(bs, RLB->s[i]);\n\n\n    return ;\n}\n\n#endif /* H263_ONLY */\n/* ======================================================================== */\n/*  Function : RUNLevel                                                     */\n/*  Date     : 09/20/2000                                                   */\n/*  Purpose  : Get the Coded Block Pattern for each block                   */\n/*  In/out   :                                                              */\n/*      Int* qcoeff     Quantized DCT coefficients\n        Int Mode        Coding Mode\n        Int ncoeffs     Number of coefficients                              */\n/*  Return   :                                                              */\n/*      Int CBP         Coded Block Pattern                                 */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nvoid RunLevel(VideoEncData *video, Int intra, Int intraDC_decision, Int ncoefblck[])\n{\n    Int i, j;\n    Int CBP = video->headerInfo.CBP[video->mbnum];\n    Int ShortNacNintra = (!(video->vol[video->currLayer]->shortVideoHeader) && video->acPredFlag[video->mbnum] && intra);\n    MacroBlock *MB = video->outputMB;\n    Short *dataBlock;\n    Int level;\n    RunLevelBlock *RLB;\n    Int run, idx;\n    Int *zz, nc, zzorder;\n    UChar imask[6] = {0x1F, 0x2F, 0x37, 0x3B, 0x3D, 0x3E};\n    UInt *bitmapzz;\n\n    /* Set Run, Level and CBP for this Macroblock */\n    /* ZZ scan is done here.  */\n\n    if (intra)\n    {\n\n        if (intraDC_decision != 0)\n            intra = 0;              /* DC/AC in Run/Level */\n\n        for (i = 0; i < 6 ; i++)\n        {\n\n            zz = (Int *) zigzag_inv;\n\n            RLB = video->RLB + i;\n\n            dataBlock = MB->block[i];\n\n            if (intra)\n            {\n                RLB->run[0] = 0;\n                level = dataBlock[0];\n                dataBlock[0] = 0; /* reset to zero */\n                if (level < 0)\n                {\n                    RLB->level[0] = -level;\n                    RLB->s[0] = 1;\n                }\n                else\n                {\n                    RLB->level[0] = level;\n                    RLB->s[0] = 0;\n                }\n            }\n\n            idx = intra;\n\n            if ((CBP >> (5 - i)) & 1)\n            {\n                if (ShortNacNintra)\n                {\n                    switch ((video->zz_direction >> (5 - i))&1)\n                    {\n                        case 0:\n                            zz = (Int *)zigzag_v_inv;\n                            break;\n                        case 1:\n                            zz = (Int *)zigzag_h_inv;\n                            break;\n                    }\n                }\n                run = 0;\n                nc = ncoefblck[i];\n                for (j = intra, zz += intra; j < nc; j++, zz++)\n                {\n                    zzorder = *zz;\n                    level = dataBlock[zzorder];\n                    if (level == 0)\n                        run++;\n                    else\n                    {\n                        dataBlock[zzorder] = 0; /* reset output */\n                        if (level < 0)\n                        {\n                            RLB->level[idx] = -level;\n                            RLB->s[idx] = 1;\n                            RLB->run[idx] = run;\n                            run = 0;\n                            idx++;\n                        }\n                        else\n                        {\n                            RLB->level[idx] = level;\n                            RLB->s[idx] = 0;\n                            RLB->run[idx] = run;\n                            run = 0;\n                            idx++;\n                        }\n                    }\n                }\n            }\n\n            ncoefblck[i] = idx; /* 5/22/01, reuse ncoefblck */\n\n            if (idx == intra) /* reset CBP, nothing to be coded */\n                CBP &= imask[i];\n        }\n\n        video->headerInfo.CBP[video->mbnum] = CBP;\n\n        return ;\n    }\n    else\n    {\n//      zz = (Int *) zigzag_inv;  no need to use it, default\n\n        if (CBP)\n        {\n            for (i = 0; i < 6 ; i++)\n            {\n                RLB = video->RLB + i;\n                idx = 0;\n\n                if ((CBP >> (5 - i)) & 1)\n                {   /* 7/30/01 */\n                    /* Use bitmapzz to find the Run,Level,Sign symbols */\n                    bitmapzz = video->bitmapzz[i];\n                    dataBlock = MB->block[i];\n                    nc  = ncoefblck[i];\n\n                    idx = zero_run_search(bitmapzz, dataBlock, RLB, nc);\n                }\n                ncoefblck[i] = idx; /* 5/22/01, reuse ncoefblck */\n                if (idx == 0) /* reset CBP, nothing to be coded */\n                    CBP &= imask[i];\n            }\n            video->headerInfo.CBP[video->mbnum] = CBP;\n        }\n        return ;\n    }\n}\n\n#ifndef H263_ONLY\n#ifdef __cplusplus\nextern \"C\"\n{\n#endif\n    static Bool IntraDCSwitch_Decision(Int Mode, Int intra_dc_vlc_thr, Int intraDCVlcQP)\n    {\n        Bool switched = FALSE;\n\n        if (Mode == MODE_INTRA || Mode == MODE_INTRA_Q)\n        {\n            if (intra_dc_vlc_thr != 0)\n            {\n                switched = (intra_dc_vlc_thr == 7 || intraDCVlcQP >= intra_dc_vlc_thr * 2 + 11);\n            }\n        }\n\n        return switched;\n    }\n#ifdef __cplusplus\n}\n#endif\n\nInt IntraDC_dpcm(Int val, Int lum, BitstreamEncVideo *bitstream)\n{\n    Int n_bits;\n    Int absval, size = 0;\n\n    absval = (val < 0) ? -val : val;    /* abs(val) */\n\n\n    /* compute dct_dc_size */\n\n    size = 0;\n    while (absval)\n    {\n        absval >>= 1;\n        size++;\n    }\n\n    if (lum)\n    {   /* luminance */\n        n_bits = PutDCsize_lum(size, bitstream);\n    }\n    else\n    {   /* chrominance */\n        n_bits = PutDCsize_chrom(size, bitstream);\n    }\n\n    if (size != 0)\n    {\n        if (val >= 0)\n        {\n            ;\n        }\n        else\n        {\n            absval = -val; /* set to \"-val\" MW 14-NOV-1996 */\n            val = absval ^((1 << size) - 1);\n        }\n        BitstreamPutBits(bitstream, (size), (UInt)(val));\n        n_bits += size;\n\n        if (size > 8)\n            BitstreamPut1Bits(bitstream, 1);\n    }\n\n    return n_bits;  /* # bits for intra_dc dpcm */\n\n}\n\n/* ======================================================================== */\n/*  Function : DC_AC_PRED                                                   */\n/*  Date     : 09/24/2000                                                   */\n/*  Purpose  : DC and AC encoding of Intra Blocks                           */\n/*  In/out   :                                                              */\n/*      VideoEncData    *video\n        UChar           Mode                                                */\n/*  Return   :                                                              */\n/*                                                                          */\n/* ======================================================================== */\nInt cal_dc_scalerENC(Int QP, Int type) ;\n\n\n#define PREDICT_AC  for (m = 0; m < 7; m++){ \\\n                        tmp = DCAC[0]*QPtmp;\\\n                        if(tmp<0)   tmp = (tmp-(QP/2))/QP;\\\n                        else        tmp = (tmp+(QP/2))/QP;\\\n                        pred[m] = tmp;\\\n                        DCAC++;\\\n                    }\n\n\nVoid DCACPred(VideoEncData *video, UChar Mode, Int *intraDC_decision, Int intraDCVlcQP)\n{\n    MacroBlock *MB = video->outputMB;\n    Int mbnum = video->mbnum;\n    typeDCStore *DC_store = video->predDC + mbnum;\n    typeDCACStore *DCAC_row = video->predDCAC_row;\n    typeDCACStore *DCAC_col = video->predDCAC_col;\n    Short   *DCAC;\n    UChar Mode_top, Mode_left;\n\n    Vol *currVol = video->vol[video->currLayer];\n    Int nMBPerRow = currVol->nMBPerRow;\n    Int x_pos = video->outputMB->mb_x; /* 5/28/01 */\n    Int y_pos = video->outputMB->mb_y;\n    UChar QP = video->QPMB[mbnum];\n    UChar *QPMB = video->QPMB;\n    UChar *slice_nb = video->sliceNo;\n    Bool bACPredEnable = video->encParams->ACDCPrediction;\n    Int *ACpred_flag = video->acPredFlag;\n    Int mid_grey = 128 << 3;\n    Int m;\n    Int comp;\n    Int dc_scale = 8, tmp;\n\n    static const Int Xpos[6] = { -1, 0, -1, 0, -1, -1};\n    static const Int Ypos[6] = { -1, -1, 0, 0, -1, -1};\n    static const Int Xtab[6] = {1, 0, 3, 2, 4, 5};\n    static const Int Ytab[6] = {2, 3, 0, 1, 4, 5};\n    static const Int Ztab[6] = {3, 2, 1, 0, 4, 5};\n\n    /* I added these to speed up comparisons */\n    static const Int Pos0[6] = { 1, 1, 0, 0, 1, 1};\n    static const Int Pos1[6] = { 1, 0, 1, 0, 1, 1};\n    static const Int B_Xtab[6] = {0, 1, 0, 1, 2, 3};\n    static const Int B_Ytab[6] = {0, 0, 1, 1, 2, 3};\n\n    Int direction[6];       /* 0: HORIZONTAL, 1: VERTICAL */\n    Int block_A, block_B, block_C;\n    Int grad_hor, grad_ver, DC_pred;\n    Short pred[7], *predptr;\n    Short pcoeff[42];\n    Short *qcoeff;\n    Int S = 0, S1, S2;\n    Int diff, QPtmp;\n    Int newCBP[6];\n    UChar mask1[6] = {0x20, 0x10, 0x8, 0x4, 0x2, 0x1};\n//  UChar mask2[6] = {0x1f,0x2f,0x37,0x3b,0x3d,0x3e};\n\n    Int y_offset, x_offset, x_tab, y_tab, z_tab;    /* speedup coefficients */\n    Int b_xtab, b_ytab;\n\n    video->zz_direction = 0;\n\n    /* Standard MPEG-4 Headers do DC/AC prediction*/\n    /* check whether neighbors are INTER */\n    if (y_pos > 0)\n    {\n        Mode_top = video->headerInfo.Mode[mbnum-nMBPerRow];\n        if (!(Mode_top == MODE_INTRA || Mode_top == MODE_INTRA_Q))\n        {\n            DCAC = DC_store[-nMBPerRow];\n            *DCAC++ = mid_grey;\n            *DCAC++ = mid_grey;\n            *DCAC++ = mid_grey;\n            *DCAC++ = mid_grey;\n            *DCAC++ = mid_grey;\n            *DCAC++ = mid_grey;\n            /* set to 0 DCAC_row[x_pos][0..3] */\n            if (bACPredEnable == TRUE)\n            {\n                M4VENC_MEMSET(DCAC_row[x_pos][0], 0, sizeof(Short) << 5);\n            }\n        }\n    }\n    if (x_pos > 0)\n    {\n        Mode_left = video->headerInfo.Mode[mbnum-1];\n        if (!(Mode_left == MODE_INTRA || Mode_left == MODE_INTRA_Q))\n        {\n            DCAC = DC_store[-1];\n            *DCAC++ = mid_grey;\n            *DCAC++ = mid_grey;\n            *DCAC++ = mid_grey;\n            *DCAC++ = mid_grey;\n            *DCAC++ = mid_grey;\n            *DCAC++ = mid_grey;\n            /* set to 0 DCAC_col[x_pos][0..3] */\n            if (bACPredEnable == TRUE)\n            {\n                M4VENC_MEMSET(DCAC_col[0][0], 0, sizeof(Short) << 5);\n            }\n        }\n    }\n\n    S1 = 0;\n    S2 = 0;\n\n    for (comp = 0; comp < 6; comp++)\n    {\n\n        if (Ypos[comp] != 0)        y_offset = -nMBPerRow;\n        else                    y_offset = 0;\n        x_offset = Xpos[comp];\n        x_tab = Xtab[comp];\n        y_tab = Ytab[comp];\n        z_tab = Ztab[comp];\n\n        b_xtab = B_Xtab[comp];\n        b_ytab = B_Ytab[comp];\n\n        qcoeff = MB->block[comp];\n\n        /****************************/\n        /*  Store DC coefficients */\n        /****************************/\n        /* Store coeff values for Intra MB */\n        if (comp == 0) dc_scale = cal_dc_scalerENC(QP, 1) ;\n        if (comp == 4) dc_scale = cal_dc_scalerENC(QP, 2) ;\n\n        QPtmp = qcoeff[0] * dc_scale; /* DC value */\n\n        if (QPtmp > 2047)   /* 10/10/01, add clipping (bug fixed) */\n            DC_store[0][comp] = 2047;\n        else if (QPtmp < -2048)\n            DC_store[0][comp] = -2048;\n        else\n            DC_store[0][comp] = QPtmp;\n\n        /**************************************************************/\n        /* Find the direction of the prediction and the DC prediction */\n        /**************************************************************/\n\n        if ((x_pos == 0) && y_pos == 0)\n        {   /* top left corner */\n            block_A = (comp == 1 || comp == 3) ? DC_store[0][x_tab] : mid_grey;\n            block_B = (comp == 3) ? DC_store[x_offset][z_tab] : mid_grey;\n            block_C = (comp == 2 || comp == 3) ? DC_store[0][y_tab] : mid_grey;\n        }\n        else if (x_pos == 0)\n        {   /* left edge */\n            block_A = (comp == 1 || comp == 3) ? DC_store[0][x_tab] : mid_grey;\n            block_B = ((comp == 1 && (slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow])) || comp == 3) ?\n                      DC_store[y_offset+x_offset][z_tab] : mid_grey;\n            block_C = (comp == 2 || comp == 3 ||\n                       (Pos0[comp] && (slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow]))) ?\n                      DC_store[y_offset][y_tab] : mid_grey;\n        }\n        else if (y_pos == 0)\n        { /* top row */\n            block_A = (comp == 1 || comp == 3 || (Pos1[comp] && (slice_nb[mbnum] == slice_nb[mbnum-1]))) ?\n                      DC_store[x_offset][x_tab] : mid_grey;\n            block_B = ((comp == 2 && (slice_nb[mbnum] == slice_nb[mbnum-1])) || comp == 3) ?\n                      DC_store[y_offset + x_offset][z_tab] : mid_grey;\n            block_C = (comp == 2 || comp == 3) ?\n                      DC_store[y_offset][y_tab] : mid_grey;\n        }\n        else\n        {\n            block_A = (comp == 1 || comp == 3 || (Pos1[comp] && (slice_nb[mbnum] == slice_nb[mbnum-1]))) ?\n                      DC_store[x_offset][x_tab] : mid_grey;\n            block_B = (((comp == 0 || comp == 4 || comp == 5) &&\n                        (slice_nb[mbnum] == slice_nb[mbnum-1-nMBPerRow])) ||\n                       (comp == 1 && (slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow])) ||\n                       (comp == 2 && (slice_nb[mbnum] == slice_nb[mbnum-1])) || (comp == 3)) ?\n                      (DC_store[y_offset + x_offset][z_tab]) : mid_grey;\n            block_C = (comp == 2 || comp == 3 || (Pos0[comp] && (slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow]))) ?\n                      DC_store[y_offset][y_tab] : mid_grey;\n        }\n        grad_hor = block_B - block_C;\n        grad_ver = block_A - block_B;\n\n        if ((PV_ABS(grad_ver)) < (PV_ABS(grad_hor)))\n        {\n            DC_pred = block_C;\n            direction[comp] = 1;\n            video->zz_direction = (video->zz_direction) | mask1[comp];\n\n        }\n        else\n        {\n            DC_pred = block_A;\n            direction[comp] = 0;\n            //video->zz_direction=video->zz_direction<<1;\n        }\n\n        /* DC prediction */\n        QPtmp = dc_scale; /* 5/28/01 */\n        qcoeff[0] -= (DC_pred + QPtmp / 2) / QPtmp;\n\n\n        if (bACPredEnable)\n        {\n            /***********************/\n            /* Find AC prediction  */\n            /***********************/\n\n            if ((x_pos == 0) && y_pos == 0)     /* top left corner */\n            {\n                if (direction[comp] == 0)\n                {\n                    if (comp == 1 || comp == 3)\n                    {\n                        QPtmp = QPMB[mbnum+x_offset];\n                        DCAC = DCAC_col[0][b_ytab];\n                        if (QPtmp != QP)\n                        {\n                            predptr = pred;\n                            PREDICT_AC\n                        }\n                        else\n                        {\n                            predptr = DCAC;\n                        }\n                    }\n                    else\n                    {\n                        predptr = pred;\n                        pred[0] = pred[1] = pred[2] = pred[3] = pred[4] = pred[5] = pred[6] = 0;\n                    }\n                }\n                else\n                {\n                    if (comp == 2 || comp == 3)\n                    {\n                        QPtmp = QPMB[mbnum+ y_offset];\n                        DCAC = DCAC_row[x_pos][b_xtab];\n                        if (QPtmp != QP)\n                        {\n                            predptr = pred;\n                            PREDICT_AC\n                        }\n                        else\n                        {\n                            predptr = DCAC;\n                        }\n                    }\n                    else\n                    {\n                        predptr = pred;\n                        pred[0] = pred[1] = pred[2] = pred[3] = pred[4] = pred[5] = pred[6] = 0;\n                    }\n                }\n            }\n            else if (x_pos == 0)    /* left edge */\n            {\n                if (direction[comp] == 0)\n                {\n                    if (comp == 1 || comp == 3)\n                    {\n                        QPtmp = QPMB[mbnum+x_offset];\n                        DCAC = DCAC_col[0][b_ytab];\n                        if (QPtmp != QP)\n                        {\n                            predptr = pred;\n                            PREDICT_AC\n                        }\n                        else\n                        {\n                            predptr = DCAC;\n                        }\n                    }\n                    else\n                    {\n                        predptr = pred;\n                        pred[0] = pred[1] = pred[2] = pred[3] = pred[4] = pred[5] = pred[6] = 0;\n                    }\n                }\n                else\n                {\n\n                    if ((Pos0[comp] && (slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow]))\n                            || comp == 2 || comp == 3)\n                    {\n                        QPtmp = QPMB[mbnum+y_offset];\n                        DCAC = DCAC_row[x_pos][b_xtab];\n                        if (QPtmp != QP)\n                        {\n                            predptr = pred;\n                            PREDICT_AC\n                        }\n                        else\n                        {\n                            predptr = DCAC;\n                        }\n                    }\n                    else\n                    {\n                        predptr = pred;\n                        pred[0] = pred[1] = pred[2] = pred[3] = pred[4] = pred[5] = pred[6] = 0;\n                    }\n                }\n            }\n            else if (y_pos == 0)  /* top row */\n            {\n                if (direction[comp] == 0)\n                {\n                    if ((Pos1[comp] && (slice_nb[mbnum] == slice_nb[mbnum-1]))\n                            || comp == 1 || comp == 3)\n                    {\n                        QPtmp = QPMB[mbnum+x_offset];\n                        DCAC = DCAC_col[0][b_ytab];\n                        if (QPtmp != QP)\n                        {\n                            predptr = pred;\n                            PREDICT_AC\n                        }\n                        else\n                        {\n                            predptr = DCAC;\n                        }\n                    }\n                    else\n                    {\n                        predptr = pred;\n                        pred[0] = pred[1] = pred[2] = pred[3] = pred[4] = pred[5] = pred[6] = 0;\n                    }\n                }\n                else\n                {\n                    if (comp == 2 || comp == 3)\n                    {\n                        QPtmp = QPMB[mbnum+y_offset];\n                        DCAC = DCAC_row[x_pos][b_xtab];\n                        if (QPtmp != QP)\n                        {\n                            predptr = pred;\n                            PREDICT_AC\n                        }\n                        else\n                        {\n                            predptr = DCAC;\n                        }\n                    }\n                    else\n                    {\n                        predptr = pred;\n                        pred[0] = pred[1] = pred[2] = pred[3] = pred[4] = pred[5] = pred[6] = 0;\n                    }\n                }\n            }\n            else\n            {\n                if (direction[comp] == 0)\n                {\n                    if ((Pos1[comp] && (slice_nb[mbnum] == slice_nb[mbnum-1]))\n                            || comp == 1 || comp == 3)\n                    {\n                        QPtmp = QPMB[mbnum+x_offset];\n                        DCAC = DCAC_col[0][b_ytab];\n                        if (QPtmp != QP)\n                        {\n                            predptr = pred;\n                            PREDICT_AC\n                        }\n                        else\n                        {\n                            predptr = DCAC;\n                        }\n                    }\n                    else\n                    {\n                        predptr = pred;\n                        pred[0] = pred[1] = pred[2] = pred[3] = pred[4] = pred[5] = pred[6] = 0;\n                    }\n                }\n                else\n                {\n                    if ((Pos0[comp] && (slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow]))\n                            || comp  == 2 || comp == 3)\n                    {\n                        QPtmp = QPMB[mbnum+y_offset];\n                        DCAC = DCAC_row[x_pos][b_xtab];\n                        if (QPtmp != QP)\n                        {\n                            predptr = pred;\n                            PREDICT_AC\n                        }\n                        else\n                        {\n                            predptr = DCAC;\n                        }\n                    }\n                    else\n                    {\n                        predptr = pred;\n                        pred[0] = pred[1] = pred[2] = pred[3] = pred[4] = pred[5] = pred[6] = 0;\n                    }\n                }\n            }\n\n            /************************************/\n            /* Decide and Perform AC prediction */\n            /************************************/\n            newCBP[comp] = 0;\n\n            if (direction[comp] == 0)   /* Horizontal, left COLUMN of block A */\n            {\n                DCAC = pcoeff + comp * 7; /* re-use DCAC as local var */\n                qcoeff += 8;\n                for (m = 0; m < 7; m++)\n                {\n                    QPtmp = qcoeff[m<<3];\n                    if (QPtmp > 0)  S1 += QPtmp;\n                    else        S1 -= QPtmp;\n                    QPtmp -= predptr[m];\n                    DCAC[m] = QPtmp; /* save prediction residue to pcoeff*/\n                    if (QPtmp)  newCBP[comp] = 1;\n                    diff = PV_ABS(QPtmp);\n                    S2 += diff;\n                }\n            }\n            else            /* Vertical, top ROW of block C */\n            {\n                qcoeff++;\n                DCAC = pcoeff + comp * 7; /* re-use DCAC as local var */\n                for (m = 0; m < 7; m++)\n                {\n                    QPtmp = qcoeff[m];\n                    if (QPtmp > 0)  S1 += QPtmp;\n                    else        S1 -= QPtmp;\n                    QPtmp -= predptr[m];\n                    DCAC[m] = QPtmp; /* save prediction residue to pcoeff*/\n                    if (QPtmp)  newCBP[comp] = 1;\n                    diff = PV_ABS(QPtmp);\n                    S2 += diff;\n                }\n            }\n\n            /****************************/\n            /*  Store DCAC coefficients */\n            /****************************/\n            /* Store coeff values for Intra MB */\n            qcoeff = MB->block[comp];\n            DCAC = DCAC_row[x_pos][b_xtab];\n            DCAC[0] = qcoeff[1];\n            DCAC[1] = qcoeff[2];\n            DCAC[2] = qcoeff[3];\n            DCAC[3] = qcoeff[4];\n            DCAC[4] = qcoeff[5];\n            DCAC[5] = qcoeff[6];\n            DCAC[6] = qcoeff[7];\n\n            DCAC = DCAC_col[0][b_ytab];\n            DCAC[0] = qcoeff[8];\n            DCAC[1] = qcoeff[16];\n            DCAC[2] = qcoeff[24];\n            DCAC[3] = qcoeff[32];\n            DCAC[4] = qcoeff[40];\n            DCAC[5] = qcoeff[48];\n            DCAC[6] = qcoeff[56];\n\n\n        } /* bACPredEnable */\n\n    } /* END COMP FOR LOOP */\n\n    //if (diff > 2047)\n    //    break;\n    S += (S1 - S2);\n\n\n    if (S >= 0 && bACPredEnable == TRUE)\n    {\n        ACpred_flag[mbnum] = 1;\n        DCAC = pcoeff; /* prediction residue */\n        qcoeff = MB->block[0];\n\n        for (comp = 0; comp < 6; comp++)\n        {\n            if (direction[comp] == 0)\n            {\n                qcoeff[8] = DCAC[0];\n                qcoeff[16] = DCAC[1];\n                qcoeff[24] = DCAC[2];\n                qcoeff[32] = DCAC[3];\n                qcoeff[40] = DCAC[4];\n                qcoeff[48] = DCAC[5];\n                qcoeff[56] = DCAC[6];\n\n            }\n            else\n            {\n                qcoeff[1] = DCAC[0];\n                qcoeff[2] = DCAC[1];\n                qcoeff[3] = DCAC[2];\n                qcoeff[4] = DCAC[3];\n                qcoeff[5] = DCAC[4];\n                qcoeff[6] = DCAC[5];\n                qcoeff[7] = DCAC[6];\n            }\n            if (newCBP[comp]) /* 5/28/01, update CBP */\n                video->headerInfo.CBP[mbnum] |= mask1[comp];\n            DCAC += 7;\n            qcoeff += 64;\n        }\n    }\n    else  /* Only DC Prediction */\n    {\n        ACpred_flag[mbnum] = 0;\n    }\n\n    *intraDC_decision = IntraDCSwitch_Decision(Mode, video->currVop->intraDCVlcThr, intraDCVlcQP);\n    if (*intraDC_decision) /* code DC with AC , 5/28/01*/\n    {\n        qcoeff = MB->block[0];\n        for (comp = 0; comp < 6; comp++)\n        {\n            if (*qcoeff)\n                video->headerInfo.CBP[mbnum] |= mask1[comp];\n            qcoeff += 64;\n        }\n    }\n    return;\n}\n#endif /* H263_ONLY */\n\n\n\nVoid find_pmvs(VideoEncData *video, Int block, Int *mvx, Int *mvy)\n{\n    Vol *currVol = video->vol[video->currLayer];\n//  UChar *Mode = video->headerInfo.Mode; /* modes for MBs */\n    UChar *slice_nb = video->sliceNo;\n    Int nMBPerRow = currVol->nMBPerRow;\n    Int mbnum = video->mbnum;\n\n    Int   p1x, p2x, p3x;\n    Int   p1y, p2y, p3y;\n    Int   xin1, xin2, xin3;\n    Int   yin1, yin2, yin3;\n    Int   vec1, vec2, vec3;\n    Int   rule1, rule2, rule3;\n    MOT   **motdata = video->mot;\n    Int   x = mbnum % nMBPerRow;\n    Int   y = mbnum / nMBPerRow;\n\n    /*\n        In a previous version, a MB vector (block = 0) was predicted the same way\n        as block 1, which is the most likely interpretation of the VM.\n\n        Therefore, if we have advanced pred. mode, and if all MBs around have\n        only one 16x16 vector each, we chose the appropiate block as if these\n        MBs have 4 vectors.\n\n        This different prediction affects only 16x16 vectors of MBs with\n        transparent blocks.\n\n        In the current version, we choose for the 16x16 mode the first\n        non-transparent block in the surrounding MBs\n    */\n\n    switch (block)\n    {\n        case 0:\n            vec1 = 2 ;\n            yin1 = y  ;\n            xin1 = x - 1;\n            vec2 = 3 ;\n            yin2 = y - 1;\n            xin2 = x;\n            vec3 = 3 ;\n            yin3 = y - 1;\n            xin3 = x + 1;\n            break;\n\n        case 1:\n            vec1 = 2 ;\n            yin1 = y  ;\n            xin1 = x - 1;\n            vec2 = 3 ;\n            yin2 = y - 1;\n            xin2 = x;\n            vec3 = 3 ;\n            yin3 = y - 1;\n            xin3 = x + 1;\n            break;\n\n        case 2:\n            vec1 = 1 ;\n            yin1 = y  ;\n            xin1 = x;\n            vec2 = 4 ;\n            yin2 = y - 1;\n            xin2 = x;\n            vec3 = 3 ;\n            yin3 = y - 1;\n            xin3 = x + 1;\n            break;\n\n        case 3:\n            vec1 = 4 ;\n            yin1 = y  ;\n            xin1 = x - 1;\n            vec2 = 1 ;\n            yin2 = y  ;\n            xin2 = x;\n            vec3 = 2 ;\n            yin3 = y  ;\n            xin3 = x;\n            break;\n\n        default: /* case 4 */\n            vec1 = 3 ;\n            yin1 = y  ;\n            xin1 = x;\n            vec2 = 1 ;\n            yin2 = y  ;\n            xin2 = x;\n            vec3 = 2 ;\n            yin3 = y  ;\n            xin3 = x;\n            break;\n    }\n\n    if (block == 0)\n    {\n        /* according to the motion encoding, we must choose a first non-transparent\n        block in the surrounding MBs (16-mode)\n            */\n\n        if (x > 0 && slice_nb[mbnum] == slice_nb[mbnum-1])\n            rule1 = 0;\n        else\n            rule1 = 1;\n\n        if (y > 0 && slice_nb[mbnum] == slice_nb[mbnum-nMBPerRow])\n            rule2 = 0;\n        else\n            rule2 = 1;\n\n        if ((x != nMBPerRow - 1) && (y > 0) && slice_nb[mbnum] == slice_nb[mbnum+1-nMBPerRow])\n            rule3 = 0;\n        else\n            rule3 = 1;\n    }\n    else\n    {\n        /* check borders for single blocks (advanced mode) */\n        /* rule 1 */\n        if (((block == 1 || block == 3) &&\n                (x == 0 || slice_nb[mbnum] != slice_nb[mbnum-1])))\n            rule1 = 1;\n        else\n            rule1 = 0;\n\n        /* rule 2 */\n        if (((block == 1 || block == 2) &&\n                (y == 0 || slice_nb[mbnum] != slice_nb[mbnum-nMBPerRow])))\n            rule2 = 1;\n        else\n            rule2 = 0;\n\n        /* rule 3 */\n        if (((block == 1 || block == 2) &&\n                (x == nMBPerRow - 1 || y == 0 || slice_nb[mbnum] != slice_nb[mbnum+1-nMBPerRow])))\n            rule3 = 1;\n        else\n            rule3 = 0;\n    }\n\n    if (rule1)\n    {\n        p1x = p1y = 0;\n    }\n    else\n    {\n\n        p1x = motdata[yin1*nMBPerRow+xin1][vec1].x;\n        p1y = motdata[yin1*nMBPerRow+xin1][vec1].y;\n        //p1x = motxdata[xin1*2+(vec1&0x1) + (yin1*2+(vec1>>1))*xB];\n        //p1y = motydata[xin1*2+(vec1&0x1) + (yin1*2+(vec1>>1))*xB];\n    }\n\n    if (rule2)\n    {\n        p2x = p2y = 0;\n    }\n    else\n    {\n        p2x = motdata[yin2*nMBPerRow+xin2][vec2].x;\n        p2y = motdata[yin2*nMBPerRow+xin2][vec2].y;\n        //p2x = motxdata[xin2*2+(vec2&0x1) + (yin2*2+(vec2>>1))*xB];\n        //p2y = motydata[xin2*2+(vec2&0x1) + (yin2*2+(vec2>>1))*xB];\n    }\n\n    if (rule3)\n    {\n        p3x = p3y = 0;\n    }\n    else\n    {\n        p3x = motdata[yin3*nMBPerRow+xin3][vec3].x;\n        p3y = motdata[yin3*nMBPerRow+xin3][vec3].y;\n        //p3x = motxdata[xin3*2+ (vec3&0x1) + (yin3*2+(vec3>>1))*xB];\n        //p3y = motydata[xin3*2+ (vec3&0x1) + (yin3*2+(vec3>>1))*xB];\n    }\n\n    if (rule1 && rule2 && rule3)\n    {\n        /* all MBs are outside the VOP */\n        *mvx = *mvy = 0;\n    }\n    else if (rule1 + rule2 + rule3 == 2)\n    {\n        /* two of three are zero */\n        *mvx = (p1x + p2x + p3x);\n        *mvy = (p1y + p2y + p3y);\n    }\n    else\n    {\n        *mvx = ((p1x + p2x + p3x - PV_MAX(p1x, PV_MAX(p2x, p3x)) - PV_MIN(p1x, PV_MIN(p2x, p3x))));\n        *mvy = ((p1y + p2y + p3y - PV_MAX(p1y, PV_MAX(p2y, p3y)) - PV_MIN(p1y, PV_MIN(p2y, p3y))));\n    }\n\n    return;\n}\n\n\nVoid WriteMVcomponent(Int f_code, Int dmv, BitstreamEncVideo *bs)\n{\n    Int residual, vlc_code_mag, bits, entry;\n\n    ScaleMVD(f_code, dmv, &residual, &vlc_code_mag);\n\n    if (vlc_code_mag < 0)\n        entry = vlc_code_mag + 65;\n    else\n        entry = vlc_code_mag;\n\n    bits = PutMV(entry, bs);\n\n    if ((f_code != 1) && (vlc_code_mag != 0))\n    {\n        BitstreamPutBits(bs, f_code - 1, residual);\n        bits += f_code - 1;\n    }\n    return;\n}\n\n\nVoid\nScaleMVD(\n    Int  f_code,       /* <-- MV range in 1/2 units: 1=32,2=64,...,7=2048     */\n    Int  diff_vector,  /* <-- MV Difference commponent in 1/2 units           */\n    Int  *residual,    /* --> value to be FLC coded                           */\n    Int  *vlc_code_mag /* --> value to be VLC coded                           */\n)\n{\n    Int   range;\n    Int   scale_factor;\n    Int   r_size;\n    Int   low;\n    Int   high;\n    Int   aux;\n\n    r_size = f_code - 1;\n    scale_factor = 1 << r_size;\n    range = 32 * scale_factor;\n    low   = -range;\n    high  =  range - 1;\n\n    if (diff_vector < low)\n        diff_vector += 2 * range;\n    else if (diff_vector > high)\n        diff_vector -= 2 * range;\n\n    if (diff_vector == 0)\n    {\n        *vlc_code_mag = 0;\n        *residual = 0;\n    }\n    else if (scale_factor == 1)\n    {\n        *vlc_code_mag = diff_vector;\n        *residual = 0;\n    }\n    else\n    {\n        aux = PV_ABS(diff_vector) + scale_factor - 1;\n        *vlc_code_mag = aux >> r_size;\n\n        if (diff_vector < 0)\n            *vlc_code_mag = -*vlc_code_mag;\n        *residual = aux & (scale_factor - 1);\n    }\n}\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/vlc_encode.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef _VLC_ENCODE_H_\n#define _VLC_ENCODE_H_\n\n#include \"mp4def.h\"\n#include \"mp4enc_api.h\"\n\nInt PutCoeff_Inter(Int run, Int level, Int last, BitstreamEncVideo *bitstream);\nInt PutCoeff_Intra(Int run, Int level, Int last, BitstreamEncVideo *bitstream);\nInt PutCBPY(Int cbpy, Char intra, BitstreamEncVideo *bitstream);\nInt PutMCBPC_Inter(Int cbpc, Int mode, BitstreamEncVideo *bitstream);\nInt PutMCBPC_Intra(Int cbpc, Int mode, BitstreamEncVideo *bitstream);\nInt PutMV(Int mvint, BitstreamEncVideo *bitstream);\nInt PutDCsize_chrom(Int size, BitstreamEncVideo *bitstream);\nInt PutDCsize_lum(Int size, BitstreamEncVideo *bitstream);\nInt PutDCsize_lum(Int size, BitstreamEncVideo *bitstream);\nInt PutCoeff_Inter_RVLC(Int run, Int level, Int last, BitstreamEncVideo *bitstream);\nInt PutCoeff_Intra_RVLC(Int run, Int level, Int last, BitstreamEncVideo *bitstream);\nInt PutRunCoeff_Inter(Int run, Int level, Int last, BitstreamEncVideo *bitstream);\nInt PutRunCoeff_Intra(Int run, Int level, Int last, BitstreamEncVideo *bitstream);\nInt PutLevelCoeff_Inter(Int run, Int level, Int last, BitstreamEncVideo *bitstream);\nInt PutLevelCoeff_Intra(Int run, Int level, Int last, BitstreamEncVideo *bitstream);\n\nVoid MB_CodeCoeff(VideoEncData *video, BitstreamEncVideo *bs);\nVoid BlockCodeCoeff(RunLevelBlock *RLB, BitstreamEncVideo *bs, Int j_start, UChar Mode, Int rvlc, Int shortVideoHeader);\n#endif /* _VLC_ENCODE_H_ */\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/vlc_encode_inline.h",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#ifndef _VLC_ENCODE_INLINE_H_\n#define _VLC_ENCODE_INLINE_H_\n\n#if !defined(PV_ARM_GCC_V5)\n\n__inline  Int zero_run_search(UInt *bitmapzz, Short *dataBlock, RunLevelBlock *RLB, Int nc)\n{\n    Int idx, run, level, j;\n    UInt end, match;\n\n    idx = 0;\n    j   = 0;\n    run = 0;\n    match = 1 << 31;\n    if (nc > 32)\n        end = 1;\n    else\n        end = 1 << (32 - nc);\n\n    while (match >= end)\n    {\n        if ((match&bitmapzz[0]) == 0)\n        {\n            run++;\n            j++;\n            match >>= 1;\n        }\n        else\n        {\n            match >>= 1;\n            level = dataBlock[j];\n            dataBlock[j] = 0; /* reset output */\n            j++;\n            if (level < 0)\n            {\n                RLB->level[idx] = -level;\n                RLB->s[idx] = 1;\n                RLB->run[idx] = run;\n                run = 0;\n                idx++;\n            }\n            else\n            {\n                RLB->level[idx] = level;\n                RLB->s[idx] = 0;\n                RLB->run[idx] = run;\n                run = 0;\n                idx++;\n            }\n        }\n    }\n    nc -= 32;\n    if (nc > 0)\n    {\n        match = 1 << 31;\n        end = 1 << (32 - nc);\n        while (match >= end)\n        {\n            if ((match&bitmapzz[1]) == 0)\n            {\n                run++;\n                j++;\n                match >>= 1;\n            }\n            else\n            {\n                match >>= 1;\n                level = dataBlock[j];\n                dataBlock[j] = 0; /* reset output */\n                j++;\n                if (level < 0)\n                {\n                    RLB->level[idx] = -level;\n                    RLB->s[idx] = 1;\n                    RLB->run[idx] = run;\n                    run = 0;\n                    idx++;\n                }\n                else\n                {\n                    RLB->level[idx] = level;\n                    RLB->s[idx] = 0;\n                    RLB->run[idx] = run;\n                    run = 0;\n                    idx++;\n                }\n            }\n        }\n    }\n\n    return idx;\n}\n\n#elif defined(__CC_ARM)  /* only work with arm v5 */\n\n__inline  Int zero_run_search(UInt *bitmapzz, Short *dataBlock, RunLevelBlock *RLB, Int nc)\n{\n    OSCL_UNUSED_ARG(nc);\n    Int idx, run, level, j;\n    UInt end, match;\n    Int  zzorder;\n\n    idx = 0;\n    run = 0;\n    j   = -1;\n    __asm\n    {\n        ldr match, [bitmapzz]\n        clz run, match\n    }\n\n    zzorder = 0;\n\n    while (run < 32)\n    {\n        __asm\n        {\n            mov end, #0x80000000\n            mov end, end, lsr run   /* mask*/\n            bic match, match, end       /* remove it from bitmap */\n            mov run, run, lsl #1  /* 05/09/02 */\n            ldrsh level, [dataBlock, run] /*  load data */\n            strh zzorder, [dataBlock, run] /* reset output */\n            add j, j, #1\n            rsb run, j, run, lsr #1 /* delta run */\n            add j, j, run           /* current position */\n        }\n        if (level < 0)\n        {\n            RLB->level[idx] = -level;\n            RLB->s[idx] = 1;\n            RLB->run[idx] = run;\n            run = 0;\n            idx++;\n        }\n        else\n        {\n            RLB->level[idx] = level;\n            RLB->s[idx] = 0;\n            RLB->run[idx] = run;\n            run = 0;\n            idx++;\n        }\n        __asm\n        {\n            clz run, match\n        }\n    }\n    __asm\n    {\n        ldr match, [bitmapzz, #4]\n        clz run, match\n    }\n\n    while (run < 32)\n    {\n        __asm\n        {\n            mov end, #0x80000000\n            mov end, end, lsr run   /* mask*/\n            bic match, match, end       /* remove it from bitmap */\n            add run, run, #32       /* current position */\n            mov run, run, lsl #1    /* 09/02/05 */\n            ldrsh level, [dataBlock, run] /*  load data */\n            strh  zzorder, [dataBlock, run] /* reset output */\n            add j, j, #1\n            rsb run, j, run, lsr #1     /* delta run */\n            add j, j, run           /* current position */\n        }\n        if (level < 0)\n        {\n            RLB->level[idx] = -level;\n            RLB->s[idx] = 1;\n            RLB->run[idx] = run;\n            run = 0;\n            idx++;\n        }\n        else\n        {\n            RLB->level[idx] = level;\n            RLB->s[idx] = 0;\n            RLB->run[idx] = run;\n            run = 0;\n            idx++;\n        }\n        __asm\n        {\n            clz run, match\n        }\n    }\n\n    return idx;\n}\n\n#elif (defined(PV_ARM_GCC_V5) ) /* ARM GNU COMPILER  */\n\n__inline Int m4v_enc_clz(UInt temp)\n{\n    register Int rb;\n    register UInt ra = (UInt)temp;\n\n    asm volatile(\"clz   %0, %1\"\n             : \"=&r\"(rb)\n                         : \"r\"(ra)\n                        );\n\n    return (rb);\n}\n\n__inline  Int zero_run_search(UInt *bitmapzz, Short *dataBlock, RunLevelBlock *RLB, Int nc)\n{\n    OSCL_UNUSED_ARG(nc);\n    Int idx, run, level = 0, j;\n    UInt end = 0, match;\n    Int  zzorder;\n\n    idx = 0;\n    run = 0;\n    j   = -1;\n    match = *bitmapzz;\n    run = m4v_enc_clz(match);\n\n    zzorder = 0;\n\n    while (run < 32)\n    {\n        asm volatile(\"mov   %0, #0x80000000\\n\\t\"\n                     \"mov   %0, %0, lsr %1\\n\\t\"\n                     \"bic   %2, %2, %0\\n\\t\"\n                     \"mov   %1, %1, lsl #1\\n\\t\"\n                     \"ldrsh %3, [%6, %1]\\n\\t\"\n                     \"strh  %5, [%6, %1]\\n\\t\"\n                     \"add   %4, %4, #1\\n\\t\"\n                     \"rsb   %1, %4, %1, lsr #1\\n\\t\"\n                     \"add   %4, %4, %1\"\n             : \"+r\"(end), \"+r\"(run), \"+r\"(match), \"=r\"(level), \"+r\"(j)\n                             : \"r\"(zzorder), \"r\"(dataBlock));\n        if (level < 0)\n        {\n            RLB->level[idx] = -level;\n            RLB->s[idx] = 1;\n            RLB->run[idx] = run;\n            run = 0;\n            idx++;\n        }\n        else\n        {\n            RLB->level[idx] = level;\n            RLB->s[idx] = 0;\n            RLB->run[idx] = run;\n            run = 0;\n            idx++;\n        }\n        run = m4v_enc_clz(match);\n    }\n    match = bitmapzz[1];\n    run = m4v_enc_clz(match);\n\n    while (run < 32)\n    {\n        asm volatile(\"mov   %0, #0x80000000\\n\\t\"\n                     \"mov   %0, %0, lsr %1\\n\\t\"\n                     \"bic   %2, %2, %0\\n\\t\"\n                     \"add   %1, %1, #32\\n\\t\"\n                     \"mov   %1, %1, lsl #1\\n\\t\"\n                     \"ldrsh %3, [%6, %1]\\n\\t\"\n                     \"strh  %5, [%6, %1]\\n\\t\"\n                     \"add   %4, %4, #1\\n\\t\"\n                     \"rsb   %1, %4, %1, lsr #1\\n\\t\"\n                     \"add   %4, %4, %1\"\n             : \"+r\"(end), \"+r\"(run), \"+r\"(match), \"+r\"(level), \"+r\"(j)\n                             : \"r\"(zzorder), \"r\"(dataBlock));\n        if (level < 0)\n        {\n            RLB->level[idx] = -level;\n            RLB->s[idx] = 1;\n            RLB->run[idx] = run;\n            run = 0;\n            idx++;\n        }\n        else\n        {\n            RLB->level[idx] = level;\n            RLB->s[idx] = 0;\n            RLB->run[idx] = run;\n            run = 0;\n            idx++;\n        }\n        run = m4v_enc_clz(match);\n    }\n\n    return idx;\n}\n\n#endif\n\n#endif // _VLC_ENCODE_INLINE_H_\n\n\n"
  },
  {
    "path": "RtspCamera/jni/m4v_h263/enc/src/vop.cpp",
    "content": "/* ------------------------------------------------------------------\n * Copyright (C) 1998-2009 PacketVideo\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n * express or implied.\n * See the License for the specific language governing permissions\n * and limitations under the License.\n * -------------------------------------------------------------------\n */\n#include \"mp4def.h\"\n#include \"mp4lib_int.h\"\n#include \"mp4enc_lib.h\"\n#include \"bitstream_io.h\"\n#include \"m4venc_oscl.h\"\n\nPV_STATUS EncodeShortHeader(BitstreamEncVideo *stream, Vop *currVop);\nPV_STATUS EncodeVOPHeader(BitstreamEncVideo *stream, Vol *currVol, Vop *currVop);\nPV_STATUS EncodeGOVHeader(BitstreamEncVideo *stream, UInt seconds);\n\nPV_STATUS EncodeVop_BXRC(VideoEncData *video);\nPV_STATUS EncodeVop_NoME(VideoEncData *video);\n\n/* ======================================================================== */\n/*  Function : DecodeVop()                                                  */\n/*  Date     : 08/23/2000                                                   */\n/*  Purpose  : Encode VOP Header                                            */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nPV_STATUS EncodeVop(VideoEncData *video)\n{\n\n    PV_STATUS status;\n    Int currLayer = video->currLayer;\n    Vol *currVol = video->vol[currLayer];\n    Vop *currVop = video->currVop;\n//  BitstreamEncVideo *stream=video->bitstream1;\n    UChar *Mode = video->headerInfo.Mode;\n    rateControl **rc = video->rc;\n//  UInt time=0;\n\n    /*******************/\n    /* Initialize mode */\n    /*******************/\n\n    switch (currVop->predictionType)\n    {\n        case I_VOP:\n            M4VENC_MEMSET(Mode, MODE_INTRA, sizeof(UChar)*currVol->nTotalMB);\n            break;\n        case P_VOP:\n            M4VENC_MEMSET(Mode, MODE_INTER, sizeof(UChar)*currVol->nTotalMB);\n            break;\n        case B_VOP:\n            /*M4VENC_MEMSET(Mode, MODE_INTER_B,sizeof(UChar)*nTotalMB);*/\n            return PV_FAIL;\n        default:\n            return PV_FAIL;\n    }\n\n    /*********************/\n    /* Motion Estimation */\n    /* compute MVs, scene change detection, edge padding, */\n    /* intra refresh, compute block activity */\n    /*********************/\n    MotionEstimation(video);    /* do ME for the whole frame */\n\n    /***************************/\n    /* rate Control (assign QP) */\n    /* 4/11/01, clean-up, and put into a separate function */\n    /***************************/\n    status = RC_VopQPSetting(video, rc);\n    if (status == PV_FAIL)\n        return PV_FAIL;\n\n    /**********************/\n    /*     Encode VOP     */\n    /**********************/\n    if (video->slice_coding) /* end here */\n    {\n        /* initialize state variable for slice-based APIs */\n        video->totalSAD = 0;\n        video->mbnum = 0;\n        video->sliceNo[0] = 0;\n        video->numIntra = 0;\n        video->offset = 0;\n        video->end_of_buf = 0;\n        video->hp_guess = -1;\n        return status;\n    }\n\n    status = EncodeVop_NoME(video);\n\n    /******************************/\n    /* rate control (update stat) */\n    /* 6/2/01 separate function */\n    /******************************/\n\n    RC_VopUpdateStat(video, rc[currLayer]);\n\n    return status;\n}\n\n/* ======================================================================== */\n/*  Function : EncodeVop_NoME()                                             */\n/*  Date     : 08/28/2001                                                   */\n/*  History  :                                                              */\n/*  Purpose  : EncodeVop without motion est.                                */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nPV_STATUS EncodeVop_NoME(VideoEncData *video)\n{\n    Vop *currVop = video->currVop;\n    Vol *currVol = video->vol[video->currLayer];\n    BitstreamEncVideo *stream = video->bitstream1;\n    Int time = 0;   /* follows EncodeVop value */\n    PV_STATUS status = PV_SUCCESS;\n\n    if (currVol->shortVideoHeader) /* Short Video Header = 1 */\n    {\n\n        status = EncodeShortHeader(stream, currVop); /* Encode Short Header */\n\n        video->header_bits = BitstreamGetPos(stream); /* Header Bits */\n\n        status = EncodeFrameCombinedMode(video);\n\n    }\n#ifndef H263_ONLY\n    else    /* Short Video Header = 0 */\n    {\n\n        if (currVol->GOVStart && currVop->predictionType == I_VOP)\n            status = EncodeGOVHeader(stream, time); /* Encode GOV Header */\n\n        status = EncodeVOPHeader(stream, currVol, currVop);  /* Encode VOP Header */\n\n        video->header_bits = BitstreamGetPos(stream); /* Header Bits */\n\n        if (currVop->vopCoded)\n        {\n            if (!currVol->scalability)\n            {\n                if (currVol->dataPartitioning)\n                {\n                    status = EncodeFrameDataPartMode(video); /* Encode Data Partitioning Mode VOP */\n                }\n                else\n                {\n                    status = EncodeFrameCombinedMode(video); /* Encode Combined Mode VOP */\n                }\n            }\n            else\n                status = EncodeFrameCombinedMode(video); /* Encode Combined Mode VOP */\n        }\n        else  /* Vop Not coded */\n        {\n\n            return status;\n        }\n    }\n#endif /* H263_ONLY */\n    return status;\n\n}\n\n#ifndef NO_SLICE_ENCODE\n/* ======================================================================== */\n/*  Function : EncodeSlice()                                                */\n/*  Date     : 04/19/2002                                                   */\n/*  History  :                                                              */\n/*  Purpose  : Encode one slice.                                            */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/*                                                                          */\n/* ======================================================================== */\n\nPV_STATUS EncodeSlice(VideoEncData *video)\n{\n    Vop *currVop = video->currVop;\n    Int currLayer = video->currLayer;\n    Vol *currVol = video->vol[currLayer];\n    BitstreamEncVideo *stream = video->bitstream1; /* different from frame-based */\n    Int time = 0;   /* follows EncodeVop value */\n    PV_STATUS status = PV_SUCCESS;\n    rateControl **rc = video->rc;\n\n    if (currVol->shortVideoHeader) /* Short Video Header = 1 */\n    {\n\n        if (video->mbnum == 0)\n        {\n            status = EncodeShortHeader(stream, currVop); /* Encode Short Header */\n\n            video->header_bits = BitstreamGetPos(stream); /* Header Bits */\n        }\n\n        status = EncodeSliceCombinedMode(video);\n\n    }\n#ifndef H263_ONLY\n    else    /* Short Video Header = 0 */\n    {\n\n        if (video->mbnum == 0)\n        {\n            if (currVol->GOVStart)\n                status = EncodeGOVHeader(stream, time); /* Encode GOV Header */\n\n            status = EncodeVOPHeader(stream, currVol, currVop);  /* Encode VOP Header */\n\n            video->header_bits = BitstreamGetPos(stream); /* Header Bits */\n        }\n\n        if (currVop->vopCoded)\n        {\n            if (!currVol->scalability)\n            {\n                if (currVol->dataPartitioning)\n                {\n                    status = EncodeSliceDataPartMode(video); /* Encode Data Partitioning Mode VOP */\n                }\n                else\n                {\n                    status = EncodeSliceCombinedMode(video); /* Encode Combined Mode VOP */\n                }\n            }\n            else\n                status = EncodeSliceCombinedMode(video); /* Encode Combined Mode VOP */\n        }\n        else  /* Vop Not coded */\n        {\n\n            return status;\n        }\n    }\n#endif /* H263_ONLY */\n    if (video->mbnum >= currVol->nTotalMB && status != PV_END_OF_BUF) /* end of Vop */\n    {\n        /******************************/\n        /* rate control (update stat) */\n        /* 6/2/01 separate function */\n        /******************************/\n\n        status = RC_VopUpdateStat(video, rc[currLayer]);\n    }\n\n    return status;\n\n}\n#endif /* NO_SLICE_ENCODE */\n\n#ifndef H263_ONLY\n/* ======================================================================== */\n/*  Function : EncodeGOVHeader()                                            */\n/*  Date     : 08/23/2000                                                   */\n/*  Purpose  : Encode GOV Header                                            */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\nPV_STATUS EncodeGOVHeader(BitstreamEncVideo *stream, UInt seconds)\n{\n    PV_STATUS status;\n//  int temp;\n    UInt tmpvar;\n\n    /********************************/\n    /* Group_of_VideoObjectPlane()  */\n    /********************************/\n\n    status = BitstreamPutGT16Bits(stream, 32, GROUP_START_CODE);\n    /* time_code */\n    tmpvar = seconds / 3600;\n    status = BitstreamPutBits(stream, 5, tmpvar); /* Hours*/\n\n    tmpvar = (seconds - tmpvar * 3600) / 60;\n    status = BitstreamPutBits(stream, 6, tmpvar); /* Minutes*/\n\n    status = BitstreamPut1Bits(stream, 1); /* Marker*/\n\n    tmpvar = seconds % 60;\n    status = BitstreamPutBits(stream, 6, tmpvar); /* Seconds*/\n\n    status = BitstreamPut1Bits(stream, 1); /* closed_gov */\n    status = BitstreamPut1Bits(stream, 0); /* broken_link */\n    /*temp =*/\n    BitstreamMpeg4ByteAlignStuffing(stream); /* Byte align GOV Header */\n\n    return status;\n}\n\n#ifdef ALLOW_VOP_NOT_CODED\n\nPV_STATUS EncodeVopNotCoded(VideoEncData *video, UChar *bstream, Int *size, ULong modTime)\n{\n    PV_STATUS status;\n    Vol *currVol = video->vol[0];\n    Vop *currVop = video->currVop;\n    BitstreamEncVideo *stream = currVol->stream;\n    UInt frameTick;\n    Int timeInc;\n\n    stream->bitstreamBuffer = bstream;\n    stream->bufferSize = *size;\n    BitstreamEncReset(stream);\n\n    status = BitstreamPutGT16Bits(stream, 32, VOP_START_CODE); /*Start Code for VOP*/\n    status = BitstreamPutBits(stream, 2, P_VOP);/* VOP Coding Type*/\n\n    frameTick = (Int)(((double)(modTime - video->modTimeRef) * currVol->timeIncrementResolution + 500) / 1000);\n    timeInc = frameTick - video->refTick[0];\n    while (timeInc >= currVol->timeIncrementResolution)\n    {\n        timeInc -= currVol->timeIncrementResolution;\n        status = BitstreamPut1Bits(stream, 1);\n        /* do not update refTick and modTimeRef yet, do it after encoding!! */\n    }\n    status = BitstreamPut1Bits(stream, 0);\n    status = BitstreamPut1Bits(stream, 1); /* marker bit */\n    status = BitstreamPutBits(stream, currVol->nbitsTimeIncRes, timeInc); /* vop_time_increment */\n    status = BitstreamPut1Bits(stream, 1); /* marker bit */\n    status = BitstreamPut1Bits(stream, 0); /* vop_coded bit */\n    BitstreamMpeg4ByteAlignStuffing(stream);\n\n    return status;\n}\n#endif\n\n/* ======================================================================== */\n/*  Function : EncodeVOPHeader()                                            */\n/*  Date     : 08/23/2000                                                   */\n/*  Purpose  : Encode VOP Header                                            */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nPV_STATUS EncodeVOPHeader(BitstreamEncVideo *stream, Vol *currVol, Vop *currVop)\n{\n    PV_STATUS status;\n    //int temp;\n\n    int MTB = currVol->moduloTimeBase;\n    /************************/\n    /* VideoObjectPlane()   */\n    /************************/\n\n    status = BitstreamPutGT16Bits(stream, 32, VOP_START_CODE); /*Start Code for VOP*/\n    status = BitstreamPutBits(stream, 2, currVop->predictionType);/* VOP Coding Type*/\n\n    currVol->prevModuloTimeBase = currVol->moduloTimeBase;\n\n    while (MTB)\n    {\n        status = BitstreamPut1Bits(stream, 1);\n        MTB--;\n    }\n    status = BitstreamPut1Bits(stream, 0);\n\n    status = BitstreamPut1Bits(stream, 1); /* marker bit */\n    status = BitstreamPutBits(stream, currVol->nbitsTimeIncRes, currVop->timeInc); /* vop_time_increment */\n    status = BitstreamPut1Bits(stream, 1); /* marker bit */\n    status = BitstreamPut1Bits(stream, currVop->vopCoded); /* vop_coded bit */\n    if (currVop->vopCoded == 0)\n    {\n        /*temp =*/\n        BitstreamMpeg4ByteAlignStuffing(stream); /* Byte align VOP Header */\n        return status;\n    }\n    if (currVop->predictionType == P_VOP)\n        status = BitstreamPut1Bits(stream, currVop->roundingType); /* vop_rounding_type */\n\n    status = BitstreamPutBits(stream, 3, currVop->intraDCVlcThr); /* intra_dc_vlc_thr */\n    status = BitstreamPutBits(stream, 5, currVop->quantizer);   /* vop_quant */\n\n    if (currVop->predictionType != I_VOP)\n        status = BitstreamPutBits(stream, 3, currVop->fcodeForward); /* vop_fcode_forward */\n    if (currVop->predictionType == B_VOP)\n        status = BitstreamPutBits(stream, 3, currVop->fcodeBackward);/* vop_fcode_backward */\n\n    if (currVol->scalability)\n        /* enhancement_type = 0 */\n        status = BitstreamPutBits(stream, 2, currVop->refSelectCode); /* ref_select_code */\n\n    return status;\n}\n#endif /* H263_ONLY */\n/* ======================================================================== */\n/*  Function : EncodeShortHeader()                                          */\n/*  Date     : 08/23/2000                                                   */\n/*  Purpose  : Encode VOP Header                                            */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified :                                                              */\n/* ======================================================================== */\n\nPV_STATUS EncodeShortHeader(BitstreamEncVideo *stream, Vop *currVop)\n{\n\n    PV_STATUS status;\n\n    status = BitstreamPutGT16Bits(stream, 22, SHORT_VIDEO_START_MARKER); /* Short_video_start_marker */\n    status = BitstreamPutBits(stream, 8, currVop->temporalRef); /* temporal_reference */\n    status = BitstreamPut1Bits(stream, 1); /* marker bit */\n    status = BitstreamPut1Bits(stream, 0); /* zero bit */\n    status = BitstreamPut1Bits(stream, 0); /* split_screen_indicator=0*/\n    status = BitstreamPut1Bits(stream, 0); /* document_camera_indicator=0*/\n    status = BitstreamPut1Bits(stream, 0); /* full_picture_freeze_release=0*/\n\n    switch (currVop->width)\n    {\n        case 128:\n            if (currVop->height == 96)\n                status = BitstreamPutBits(stream, 3, 1); /* source_format = 1 */\n            else\n            {\n                status = PV_FAIL;\n                return status;\n            }\n            break;\n\n        case 176:\n            if (currVop->height == 144)\n                status = BitstreamPutBits(stream, 3, 2); /* source_format = 2 */\n            else\n            {\n                status = PV_FAIL;\n                return status;\n            }\n            break;\n\n        case 352:\n            if (currVop->height == 288)\n                status = BitstreamPutBits(stream, 3, 3); /* source_format = 3 */\n            else\n            {\n                status = PV_FAIL;\n                return status;\n            }\n            break;\n\n        case 704:\n            if (currVop->height == 576)\n                status = BitstreamPutBits(stream, 3, 4); /* source_format = 4 */\n            else\n            {\n                status = PV_FAIL;\n                return status;\n            }\n            break;\n\n        case 1408:\n            if (currVop->height == 1152)\n                status = BitstreamPutBits(stream, 3, 5); /* source_format = 5 */\n            else\n            {\n                status = PV_FAIL;\n                return status;\n            }\n            break;\n\n        default:\n            status = PV_FAIL;\n            return status;\n    }\n\n\n    status = BitstreamPut1Bits(stream, currVop->predictionType); /* picture_coding type */\n    status = BitstreamPutBits(stream, 4, 0); /* four_reserved_zero_bits */\n    status = BitstreamPutBits(stream, 5, currVop->quantizer); /* vop_quant*/\n    status = BitstreamPut1Bits(stream, 0); /* zero_bit*/\n    status = BitstreamPut1Bits(stream, 0); /* pei=0 */\n\n    return status;\n}\n\n#ifndef H263_ONLY\n/* ======================================================================== */\n/*  Function : EncodeVideoPacketHeader()                                    */\n/*  Date     : 09/05/2000                                                   */\n/*  History  :                                                              */\n/*  Purpose  : Encode a frame of MPEG4 bitstream in Combined mode.          */\n/*  In/out   :                                                              */\n/*  Return   :                                                              */\n/*  Modified : 04/25/2002                               */\n/*             Add bitstream structure as input argument                    */\n/*                                                                          */\n/* ======================================================================== */\nPV_STATUS EncodeVideoPacketHeader(VideoEncData *video, int MB_number,\n                                  int quant_scale, Int insert)\n{\n//  PV_STATUS status=PV_SUCCESS;\n    int fcode;\n    Vop *currVop = video->currVop;\n    Vol *currVol = video->vol[video->currLayer];\n    BitstreamEncVideo *bs, tmp;\n    UChar buffer[30];\n\n    if (insert) /* insert packet header to the beginning of bs1 */\n    {\n        tmp.bitstreamBuffer = buffer; /* use temporary buffer */\n        tmp.bufferSize = 30;\n        BitstreamEncReset(&tmp);\n        bs = &tmp;\n    }\n    else\n        bs = video->bitstream1;\n\n\n    if (currVop->predictionType == I_VOP)\n        BitstreamPutGT16Bits(bs, 17, 1);    /* resync_marker I_VOP */\n    else if (currVop->predictionType == P_VOP)\n    {\n        fcode = currVop->fcodeForward;\n        BitstreamPutGT16Bits(bs, 16 + fcode, 1);    /* resync_marker P_VOP */\n\n    }\n    else\n    {\n        fcode = currVop->fcodeForward;\n        if (currVop->fcodeBackward > fcode)\n            fcode = currVop->fcodeBackward;\n        BitstreamPutGT16Bits(bs, 16 + fcode, 1);    /* resync_marker B_VOP */\n    }\n\n    BitstreamPutBits(bs, currVol->nBitsForMBID, MB_number); /* resync_marker */\n    BitstreamPutBits(bs, 5, quant_scale); /* quant_scale */\n    BitstreamPut1Bits(bs, 0); /* header_extension_code = 0 */\n\n    if (0) /* header_extension_code = 1 */\n    {\n        /* NEED modulo_time_base code here ... default 0x01  belo*/\n        /*status =*/\n        BitstreamPut1Bits(bs, 1);\n        /*status = */\n        BitstreamPut1Bits(bs, 0);\n\n        /*status = */\n        BitstreamPut1Bits(bs, 1); /* marker bit */\n        /*status = */\n        BitstreamPutBits(bs, currVol->nbitsTimeIncRes, currVop->timeInc); /* vop_time_increment */\n        /*status = */\n        BitstreamPut1Bits(bs, 1); /* marker bit */\n\n        /*status = */\n        BitstreamPutBits(bs, 2, currVop->predictionType);/* VOP Coding Type*/\n\n        /*status = */\n        BitstreamPutBits(bs, 3, currVop->intraDCVlcThr); /* intra_dc_vlc_thr */\n\n        if (currVop->predictionType != I_VOP)\n            /*status = */ BitstreamPutBits(bs, 3, currVop->fcodeForward);\n        if (currVop->predictionType == B_VOP)\n            /*status = */ BitstreamPutBits(bs, 3, currVop->fcodeBackward);\n    }\n#ifndef NO_SLICE_ENCODE\n    if (insert)\n        BitstreamPrependPacket(video->bitstream1, bs);\n#endif\n    return PV_SUCCESS;\n}\n\n#endif /* H263_ONLY */\n\n\n\n"
  },
  {
    "path": "RtspCamera/proguard.cfg",
    "content": "-optimizationpasses 5\n-dontusemixedcaseclassnames\n-dontskipnonpubliclibraryclasses\n-dontpreverify\n-verbose\n-optimizations !code/simplification/arithmetic,!field/*,!class/merging/*\n\n-keep public class * extends android.app.Activity\n-keep public class * extends android.app.Application\n-keep public class * extends android.app.Service\n-keep public class * extends android.content.BroadcastReceiver\n-keep public class * extends android.content.ContentProvider\n-keep public class * extends android.app.backup.BackupAgentHelper\n-keep public class * extends android.preference.Preference\n-keep public class com.android.vending.licensing.ILicensingService\n\n-keepclasseswithmembernames class * {\n    native <methods>;\n}\n\n-keepclasseswithmembers class * {\n    public <init>(android.content.Context, android.util.AttributeSet);\n}\n\n-keepclasseswithmembers class * {\n    public <init>(android.content.Context, android.util.AttributeSet, int);\n}\n\n-keepclassmembers class * extends android.app.Activity {\n   public void *(android.view.View);\n}\n\n-keepclassmembers enum * {\n    public static **[] values();\n    public static ** valueOf(java.lang.String);\n}\n\n-keep class * implements android.os.Parcelable {\n  public static final android.os.Parcelable$Creator *;\n}\n"
  },
  {
    "path": "RtspCamera/project.properties",
    "content": "# This file is automatically generated by Android Tools.\n# Do not modify this file -- YOUR CHANGES WILL BE ERASED!\n#\n# This file must be checked in Version Control Systems.\n#\n# To customize properties used by the Ant build system use,\n# \"ant.properties\", and override values to adapt the script to your\n# project structure.\n\n# Project target.\ntarget=android-10\nandroid.library=true\n"
  },
  {
    "path": "RtspCamera/res/layout/cameraapicodecs.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<SurfaceView xmlns:android=\"http://schemas.android.com/apk/res/android\"\n    android:id=\"@+id/smallcameraview\"\n    android:layout_width=\"match_parent\"\n    android:layout_height=\"match_parent\"\n    >\n</SurfaceView>"
  },
  {
    "path": "RtspCamera/res/layout/cameranativecodecs.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<SurfaceView xmlns:android=\"http://schemas.android.com/apk/res/android\"\n    android:id=\"@+id/smallcameraview\"\n    android:layout_width=\"match_parent\"\n    android:layout_height=\"match_parent\"\n    >\n</SurfaceView>"
  },
  {
    "path": "RtspCamera/res/values/strings.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<resources>\n    <string name=\"hello\">Spydroid running...</string>\n    <string name=\"app_name\">RtspCamera</string>\n</resources>\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/CoreException.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core;\r\n\r\n/**\r\n * Core module exception\r\n * \r\n * @author JM. Auffret\r\n */\r\npublic class CoreException extends java.lang.Exception {\r\n\tstatic final long serialVersionUID = 1L;\r\n\t\r\n\t/**\r\n\t * Constructor\r\n\t *\r\n\t * @param error Error message\r\n\t */\r\n\tpublic CoreException(String error) {\r\n\t\tsuper(error);\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/CodecChain.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.codec.Codec;\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.stream.ProcessorOutputStream;\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer;\r\nimport com.orangelabs.rcs.utils.logger.Logger;\r\n\r\n/**\r\n * Codec chain\r\n * \r\n * @author jexa7410\r\n */\r\npublic class CodecChain {\r\n\t/**\r\n\t * List of codecs\r\n\t */\r\n\tprivate Codec[] codecs = null;\r\n\t\r\n\t/**\r\n\t * List of buffers\r\n\t */\r\n\tprivate Buffer[] buffers = null;\r\n\t\r\n\t/**\r\n\t * Renderer\r\n\t */\r\n\tprivate ProcessorOutputStream renderer;\r\n\t\r\n\t/**\r\n     * The logger\r\n     */\r\n    private Logger logger = Logger.getLogger(this.getClass().getName());\r\n\r\n    /**\r\n\t * Constructor\r\n\t * \r\n\t * @param codecs Codecs list\r\n\t */\r\n\tpublic CodecChain(Codec[] codecs, ProcessorOutputStream renderer) {\r\n\t\tthis.codecs = codecs;\r\n\t\tthis.renderer = renderer;\r\n\r\n\t\t// Create the buffer chain\r\n\t\tbuffers = new Buffer[codecs.length+1];\r\n\t\tfor (int i = 0; i < codecs.length; i++) {\r\n\t\t\tbuffers[i] = new Buffer();\r\n\t\t}\r\n\t\t\r\n\t\t// Prepare codecs\r\n    \tfor(int i=0; i < codecs.length; i++) {\r\n    \t\tif (logger.isActivated()) {\r\n    \t\t\tlogger.debug(\"Open codec \" + codecs[i].getClass().getName());\r\n    \t\t}\r\n    \t\tcodecs[i].open();\r\n    \t}\r\n\t}\r\n\t\r\n\t/**\r\n\t * Codec chain processing\r\n\t * \r\n\t * @param input Input buffer\r\n\t * @return Result\r\n\t */\r\n\tpublic int process(Buffer input) {\r\n\t\tint codecNo = 0;\r\n\t\treturn doProcess(codecNo, input);\r\n\t}\r\n\r\n\t/**\r\n\t * Recursive codec processing\r\n\t * \r\n\t * @param codecNo Codec index\r\n\t * @param input Input buffer\r\n\t * @return Result\r\n\t */\r\n\tprivate int doProcess(int codecNo, Buffer input) {\r\n\t\tif (codecNo == codecs.length) {\r\n\t\t\t// End of chain\r\n\t\t\ttry {\r\n\t\t\t\t// Write data to the output stream\r\n\t\t\t\trenderer.write(input);\r\n\t\t\t\treturn Codec.BUFFER_PROCESSED_OK;\r\n\t\t\t} catch (Exception e) {\r\n\t\t\t\treturn Codec.BUFFER_PROCESSED_FAILED;\r\n\t\t\t}\r\n\t\t} else {\r\n\t\t\t// Process this codec\r\n\t\t\tCodec codec = codecs[codecNo];\r\n\t\t\tint returnVal;\r\n\t\t\tdo {\r\n\t\t\t\ttry {\r\n\t\t\t\t\treturnVal = codec.process(input, buffers[codecNo]);\r\n\t\t\t\t} catch (Exception e) {\r\n\t\t\t\t\treturn Codec.BUFFER_PROCESSED_FAILED;\r\n\t\t\t\t}\r\n\r\n\t\t\t\tif (returnVal == Codec.BUFFER_PROCESSED_FAILED)\r\n\t\t\t\t\treturn Codec.BUFFER_PROCESSED_FAILED;\r\n\t\t\t\t\r\n\t\t\t\tif ((returnVal & Codec.OUTPUT_BUFFER_NOT_FILLED) == 0) {\r\n\t\t\t\t\tif (!(buffers[codecNo].isDiscard() || buffers[codecNo].isEOM())) {\r\n\t\t\t\t\t\tdoProcess(codecNo + 1, buffers[codecNo]);\r\n\t\t\t\t\t}\r\n\t\t\t\t\tbuffers[codecNo].setOffset(0);\r\n\t\t\t\t\tbuffers[codecNo].setLength(0);\r\n\t\t\t\t\tbuffers[codecNo].setFlags(0);\r\n\t\t\t\t}\r\n\t\t\t} while((returnVal & Codec.INPUT_BUFFER_NOT_CONSUMED) != 0);\r\n\r\n\t\t\treturn returnVal;\r\n\t\t}\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/MediaRegistry.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp;\r\n\r\nimport java.util.Enumeration;\nimport java.util.Hashtable;\nimport java.util.Vector;\n\nimport com.orangelabs.rcs.core.ims.protocol.rtp.codec.Codec;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.Format;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.audio.AudioFormat;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.audio.PcmuAudioFormat;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.video.H263VideoFormat;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.video.H264VideoFormat;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.video.VideoFormat;\n\r\n/**\r\n * Media registry that handles the supported codecs\r\n * \r\n * @author jexa7410\r\n */\r\npublic class MediaRegistry {\r\n\r\n\t/**\r\n\t * Supported codecs\r\n\t */\r\n\tprivate static Hashtable<String, Format> SUPPORTED_CODECS = new Hashtable<String, Format>();\r\n\tstatic {\r\n\t\tSUPPORTED_CODECS.put(H263VideoFormat.ENCODING.toLowerCase(), new H263VideoFormat());\t\t\r\n\t\tSUPPORTED_CODECS.put(H264VideoFormat.ENCODING.toLowerCase(), new H264VideoFormat());\t\t\n\t\tSUPPORTED_CODECS.put(PcmuAudioFormat.ENCODING.toLowerCase(), new PcmuAudioFormat());\n\t}\r\n\t\r\n\t/**\r\n\t * Returns the list of the supported video format\r\n\t * \r\n\t * @return List of video formats\r\n\t */\r\n\tpublic static Vector<VideoFormat> getSupportedVideoFormats() {\r\n\t\tVector<VideoFormat> list = new Vector<VideoFormat>();\r\n    \tfor (Enumeration<Format> e = SUPPORTED_CODECS.elements() ; e.hasMoreElements() ;) {\r\n\t         Format fmt = (Format)e.nextElement();\r\n\t         if (fmt instanceof VideoFormat) {\r\n\t\t         list.addElement((VideoFormat)fmt);\r\n\t         }\r\n\t     }\r\n\t\treturn list;\r\n\t}\r\n\t\r\n\t/**\r\n\t * Returns the list of the supported audio format\r\n\t * \r\n\t * @return List of audio formats\r\n\t */\r\n\tpublic static Vector<AudioFormat> getSupportedAudioFormats() {\r\n\t\tVector<AudioFormat> list = new Vector<AudioFormat>();\r\n    \tfor (Enumeration<Format> e = SUPPORTED_CODECS.elements() ; e.hasMoreElements() ;) {\r\n\t         Format fmt = (Format)e.nextElement();\r\n\t         if (fmt instanceof AudioFormat) {\r\n\t\t         list.addElement((AudioFormat)fmt);\r\n\t         }\r\n\t     }\r\n\t\treturn list;\r\n\t}\r\n\r\n\t/**\r\n     * Generate the format associated to the codec name\r\n     * \r\n     * @param codec Codec name\r\n     * @return Format\r\n     */\r\n    public static Format generateFormat(String codec) {\r\n    \treturn (Format)SUPPORTED_CODECS.get(codec.toLowerCase());\r\n    }    \r\n    \r\n\t/**\r\n     * Is codec supported\r\n     * \r\n     * @param codec Codec name\r\n     * @return Boolean\r\n     */\r\n    public static boolean isCodecSupported(String codec) {\r\n    \tFormat format = (Format)SUPPORTED_CODECS.get(codec.toLowerCase());\r\n\t\treturn (format != null);\r\n    }    \r\n    \r\n\t/**\r\n     * Generate the codec encoding chain\r\n     * \r\n\t * @param encoding Encoding name\r\n     * @return Codec chain\r\n     */\r\n    public static Codec[] generateEncodingCodecChain(String encoding) {\r\n    \tif (encoding.toLowerCase().equalsIgnoreCase(H263VideoFormat.ENCODING)) {\r\n    \t\t// Java H263 packetizer\r\n    \t\tCodec[] chain = {\r\n    \t\t\tnew com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.JavaPacketizer()\r\n    \t\t};\r\n    \t\treturn chain;\r\n\t\t} else { \r\n\t\t\t// Codec implemented in the native part\r\n\t\t\treturn new Codec[0];\r\n\t\t}\r\n    }\r\n\r\n\t/**\r\n\t * Generate the decoding codec chain\r\n\t * \r\n\t * @param encoding Encoding name\r\n\t * @return Codec chain\r\n\t */\r\n\tpublic static Codec[] generateDecodingCodecChain(String encoding) {\r\n    \tif (encoding.toLowerCase().equalsIgnoreCase(H263VideoFormat.ENCODING)) {\r\n    \t\t// Java H263 depacketizer\r\n    \t\tCodec[] chain = {\r\n    \t\t\tnew com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.JavaDepacketizer()\r\n    \t\t};\r\n    \t\treturn chain;\r\n\t\t} else { \r\n\t\t\t// Codec implemented in the native part\r\n\t\t\treturn new Codec[0];\r\n\t\t}\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/MediaRtpReceiver.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp;\r\n\r\n\n\nimport com.orangelabs.rcs.core.ims.protocol.rtp.codec.Codec;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.Format;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.media.MediaOutput;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.stream.MediaRendererStream;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.stream.RtpInputStream;\nimport com.orangelabs.rcs.utils.logger.Logger;\n\r\n/**\r\n * Media RTP receiver\r\n */\r\npublic class MediaRtpReceiver {\r\n    /**\r\n     * Media processor\r\n     */\r\n    private Processor processor = null;\r\n\r\n\t/**\r\n\t * Local port number (RTP listening port)\r\n\t */\r\n\tprivate int localPort;\r\n\n    /**\n     * RTP Input Stream\n     */\n    private RtpInputStream inputStream = null;\n\r\n\t/**\r\n\t * The logger\r\n\t */\r\n\tprivate Logger logger =\tLogger.getLogger(this.getClass().getName());\n\n    /**\n     * Constructor\n     *\n     * @param localPort Local port number\n     */\r\n\tpublic MediaRtpReceiver(int localPort) {\r\n\t\tthis.localPort = localPort;\r\n\t}\r\n\r\n    /**\n     * Prepare the RTP session\n     *\n     * @param renderer Media renderer\n     * @param format Media format\n     * @throws RtpException\n     */\r\n    public void prepareSession(MediaOutput renderer, Format format)\n            throws RtpException {\r\n    \ttry {\r\n\t\t\t// Create the input stream\r\n            inputStream = new RtpInputStream(localPort, format);\r\n    \t\tinputStream.open();\r\n\t\t\tif (logger.isActivated()) {\r\n\t\t\t\tlogger.debug(\"Input stream: \" + inputStream.getClass().getName());\r\n\t\t\t}\r\n\r\n            // Create the output stream\r\n        \tMediaRendererStream outputStream = new MediaRendererStream(renderer);\r\n    \t\toutputStream.open();\r\n\t\t\tif (logger.isActivated()) {\r\n\t\t\t\tlogger.debug(\"Output stream: \" + outputStream.getClass().getName());\r\n\t\t\t}\r\n\r\n        \t// Create the codec chain\r\n        \tCodec[] codecChain = MediaRegistry.generateDecodingCodecChain(format.getCodec());\r\n\r\n            // Create the media processor\r\n    \t\tprocessor = new Processor(inputStream, outputStream, codecChain);\r\n\r\n        \tif (logger.isActivated()) {\r\n        \t\tlogger.debug(\"Session has been prepared with success\");\r\n            }\r\n        } catch(Exception e) {\r\n        \tif (logger.isActivated()) {\r\n        \t\tlogger.error(\"Can't prepare resources correctly\", e);\r\n        \t}\r\n        \tthrow new RtpException(\"Can't prepare resources\");\r\n        }\r\n    }\r\n\r\n    /**\r\n\t * Start the RTP session\r\n\t */\r\n\tpublic void startSession() {\r\n\t\tif (logger.isActivated()) {\r\n\t\t\tlogger.info(\"Start the session\");\r\n\t\t}\r\n\r\n\t\t// Start the media processor\r\n\t\tif (processor != null) {\r\n\t\t\tprocessor.startProcessing();\r\n\t\t}\r\n\t}\r\n\r\n\t/**\r\n\t * Stop the RTP session\r\n\t */\r\n\tpublic void stopSession() {\r\n\t\tif (logger.isActivated()) {\r\n\t\t\tlogger.info(\"Stop the session\");\r\n\t\t}\r\n\r\n\t\t// Stop the media processor\r\n\t\tif (processor != null) {\r\n\t\t\tprocessor.stopProcessing();\r\n\t\t}\r\n\t}\n\n    /**\n     * Returns the RTP input stream\n     *\n     * @return RTP input stream\n     */\n    public RtpInputStream getInputStream() {\n        return inputStream;\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/Processor.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp;\r\n\nimport com.orangelabs.rcs.core.ims.protocol.rtp.codec.Codec;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.stream.ProcessorInputStream;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.stream.ProcessorOutputStream;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer;\nimport com.orangelabs.rcs.utils.logger.Logger;\n\n/**\n * Media processor. A processor receives an input stream, use a codec chain\n * to filter the data before to send it to the output stream.\n *\n * @author jexa7410\n */\r\npublic class Processor extends Thread {\r\n\t/**\r\n\t * Processor input stream\r\n\t */\r\n\tprivate ProcessorInputStream inputStream;\r\n\r\n\t/**\r\n\t * Processor output stream\r\n\t */\r\n\tprivate ProcessorOutputStream outputStream;\r\n\r\n\t/**\r\n\t * Codec chain\r\n\t */\r\n\tprivate CodecChain codecChain;\r\n\r\n\t/**\r\n\t * Processor status flag\r\n\t */\r\n\tprivate boolean interrupted = false;\r\n\n    /**\n     * bigger Sequence Number\n     */\n    private long bigSeqNum = 0;\n\n    /**\r\n     * The logger\r\n     */\r\n    private Logger logger = Logger.getLogger(this.getClass().getName());\r\n\r\n    /**\n     * Constructor\n     *\n     * @param inputStream Input stream\n     * @param outputStream Output stream\n     * @param codecs List of codecs\n     */\r\n\tpublic Processor(ProcessorInputStream inputStream, ProcessorOutputStream outputStream, Codec[] codecs) {\r\n        super();\n\r\n\t\tthis.inputStream = inputStream;\r\n        this.outputStream = outputStream;\r\n\r\n\t\t// Create the codec chain\r\n\t\tcodecChain = new CodecChain(codecs, outputStream);\r\n\r\n    \tif (logger.isActivated()) {\r\n    \t\tlogger.debug(\"Media processor created\");\r\n        }\r\n\t}\r\n\r\n\t/**\r\n\t * Start processing\r\n\t */\r\n\tpublic void startProcessing() {\r\n\t\tif (logger.isActivated()) {\r\n\t\t\tlogger.debug(\"Start media processor\");\r\n\t\t}\r\n\t\tinterrupted = false;\n        bigSeqNum = 0;\r\n        start();\r\n\t}\r\n\r\n\t/**\r\n\t * Stop processing\r\n\t */\r\n\tpublic void stopProcessing() {\r\n\t\tif (logger.isActivated()) {\r\n\t\t\tlogger.debug(\"Stop media processor\");\r\n\t\t}\r\n\t\tinterrupted = true;\r\n\r\n\t\t// Close streams\r\n\t\toutputStream.close();\r\n\t\tinputStream.close();\r\n\t}\r\n\n\t/**\r\n\t * Background processing\r\n\t */\r\n\tpublic void run() {\r\n\t\ttry {\r\n\t\t\tif (logger.isActivated()) {\r\n\t\t\t\tlogger.debug(\"Processor processing is started\");\r\n\t\t\t}\r\n\n\t\t\t// Start processing\r\n\t\t\twhile (!interrupted) {\r\n\t\t\t\t// Read data from the input stream\r\n\t\t\t\tBuffer inBuffer = inputStream.read();\r\n\t\t\t\tif (inBuffer == null) {\r\n\t\t\t\t\tinterrupted = true;\r\n\t\t\t\t\tif (logger.isActivated()) {\r\n\t\t\t\t\t\tlogger.debug(\"Processing terminated: null data received\");\r\n\t\t\t\t\t}\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t}\n\n                // Drop the old packet\n                long seqNum = inBuffer.getSequenceNumber();\n                \n                if (seqNum + 3 > bigSeqNum) {\n                \t\n                    /*\n                     * don't send a packet twice\n                     * with in band SPS/PPS parameters this will break processing otherwise \n                     */\n                \tif (seqNum==bigSeqNum)\n                \t\tcontinue; \n                \t\t\n                    if (seqNum > bigSeqNum) {\n                        bigSeqNum = seqNum;\n                    }\n\n                    // Codec chain processing\n                    int result = codecChain.process(inBuffer);\n                    if ((result != Codec.BUFFER_PROCESSED_OK)\n                            && (result != Codec.OUTPUT_BUFFER_NOT_FILLED)) {\n                        interrupted = true;\n                        if (logger.isActivated()) {\n                            logger.error(\"Codec chain processing error: \" + result);\n                        }\n                        break;\n                    }\n                }\r\n\t\t\t}\r\n\t\t} catch (Exception e) {\r\n\t\t\tif (!interrupted) {\r\n\t\t\t\tif (logger.isActivated()) {\r\n\t\t\t\t\tlogger.error(\"Processor error\", e);\r\n\t\t\t\t}\r\n\t\t\t} else {\r\n\t\t\t\tif (logger.isActivated()) {\r\n\t\t\t\t\tlogger.debug(\"Processor processing has been terminated\");\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\n\n    /**\n     * Returns the input stream\n     *\n     * @return Stream\n     */\r\n\tpublic ProcessorInputStream getInputStream() {\r\n\t\treturn inputStream;\r\n\t}\n\n    /**\n     * Returns the output stream\n     *\n     * @return Stream\n     */\r\n\tpublic ProcessorOutputStream getOutputStream() {\r\n\t\treturn outputStream;\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/RtpException.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp;\r\n\r\n/**\r\n * RTP exception\r\n * \r\n * @author JM. Auffret\r\n */\r\npublic class RtpException extends java.lang.Exception {\r\n\tstatic final long serialVersionUID = 1L;\r\n\t\r\n\t/**\r\n\t * Constructor\r\n\t *\r\n\t * @param error Error message\r\n\t */\r\n\tpublic RtpException(String error) {\r\n\t\tsuper(error);\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/Codec.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.codec;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.Format;\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer;\r\n\r\n/**\r\n * Abstract codec\r\n * \r\n * @author jexa7410\r\n */\r\npublic abstract class Codec {\r\n\r\n\t/**\r\n\t * The input buffer was converted successfully to output\r\n\t */\r\n\tpublic static final int BUFFER_PROCESSED_OK = 0;\r\n\r\n\t/**\r\n\t * The input buffer could not be handled\r\n\t */\r\n\tpublic static final int BUFFER_PROCESSED_FAILED = 1 << 0;\r\n\r\n\t/**\r\n\t * The input buffer chunk was not fully consumed\r\n\t */\r\n\tpublic static final int INPUT_BUFFER_NOT_CONSUMED = 1 << 1;\r\n\r\n\t/**\r\n\t * The output buffer chunk was not filled\r\n\t */\r\n\tpublic static final int OUTPUT_BUFFER_NOT_FILLED = 1 << 2;\r\n\r\n\t/**\r\n\t * Input format\r\n\t */\r\n\tprivate Format inputFormat;\r\n\r\n\t/**\r\n\t * Ouput format\r\n\t */\r\n\tprivate Format outputFormat;\r\n\r\n\t/**\r\n\t * Set the input format\r\n\t * \r\n\t * @param input Input format\r\n\t * @return New format\r\n\t */\r\n\tpublic Format setInputFormat(Format input) {\r\n\t\tinputFormat = input;\r\n\t\treturn input;\r\n\t}\r\n\r\n\t/**\r\n\t * Set the output format\r\n\t * \r\n\t * @param output Output format\r\n\t * @return New format\r\n\t */\r\n\tpublic Format setOutputFormat(Format output) {\r\n\t\toutputFormat = output;\r\n\t\treturn output;\r\n\t}\r\n\r\n\t/**\r\n\t * Return the input format\r\n\t * \r\n\t * @return Format\r\n\t */\r\n\tpublic Format getInputFormat() {\r\n\t\treturn inputFormat;\r\n\t}\r\n\r\n\t/**\r\n\t * Return the output format\r\n\t * \r\n\t * @return Format\r\n\t */\r\n\tpublic Format getOutputFormat() {\r\n\t\treturn outputFormat;\r\n\t}\r\n\r\n\t/**\r\n\t * Reset the codec\r\n\t */\r\n\tpublic void reset() {\r\n\t}\r\n\r\n\t/**\r\n\t * Open the codec\r\n\t */\r\n\tpublic void open() {\r\n\t}\r\n\r\n\t/**\r\n\t * Close the codec\r\n\t */\r\n\tpublic void close() {\r\n\t}\r\n\r\n\t/**\r\n\t * Test if it's the end of media\r\n\t * \r\n\t * @return Boolean\r\n\t */\r\n\tprotected boolean isEOM(Buffer inputBuffer) {\r\n\t\treturn inputBuffer.isEOM();\r\n\t}\r\n\r\n\t/**\r\n\t * Propagate EOM to the ouput buffer\r\n\t * \r\n\t * @param outputBuffer Ouput buffer\r\n\t */\r\n\tprotected void propagateEOM(Buffer outputBuffer) {\r\n\t\tupdateOutput(outputBuffer, getOutputFormat(), 0, 0);\r\n\t\toutputBuffer.setEOM(true);\r\n\t}\r\n\r\n\t/**\r\n\t * Update the ouput buffer informations\r\n\t * \r\n\t * @param outputBuffer Ouput buffer\r\n\t * @param format Ouput format\r\n\t * @param length Ouput length\r\n\t * @param offset Ouput offset\r\n\t */\r\n\tprotected void updateOutput(Buffer outputBuffer, Format format, int length,\r\n\t\t\tint offset) {\r\n\t\toutputBuffer.setFormat(format);\r\n\t\toutputBuffer.setLength(length);\r\n\t\toutputBuffer.setOffset(offset);\r\n\t}\r\n\r\n\t/**\r\n\t * Check the input buffer\r\n\t * \r\n\t * @param inputBuffer Input buffer\r\n\t * @return Boolean\r\n\t */\r\n\tprotected boolean checkInputBuffer(Buffer inputBuffer) {\r\n\t\tboolean fError = !isEOM(inputBuffer)\r\n\t\t\t\t&& (inputBuffer == null || inputBuffer.getFormat() == null);\r\n\t\treturn !fError;\r\n\t}\r\n\r\n\t/**\r\n\t * Validate that the Buffer's data size is at least newSize\r\n\t * \r\n\t * @return Array with sufficient capacity\r\n\t */\r\n\tprotected byte[] validateByteArraySize(Buffer buffer, int newSize) {\r\n\t\tbyte[] typedArray = (byte[]) buffer.getData();\r\n\t\tif (typedArray != null) {\r\n\t\t\tif (typedArray.length >= newSize) {\r\n\t\t\t\treturn typedArray;\r\n\t\t\t}\r\n\r\n\t\t\tbyte[] tempArray = new byte[newSize];\r\n\t\t\tSystem.arraycopy(typedArray, 0, tempArray, 0, typedArray.length);\r\n\t\t\ttypedArray = tempArray;\r\n\t\t} else {\r\n\t\t\ttypedArray = new byte[newSize];\r\n\t\t}\r\n\r\n\t\tbuffer.setData(typedArray);\r\n\t\treturn typedArray;\r\n\t}\r\n\r\n\t/**\r\n\t * Performs the media processing defined by this codec\r\n\t * \r\n\t * @param input The buffer that contains the media data to be processed\r\n\t * @param output The buffer in which to store the processed media data\r\n\t * @return Processing result\r\n\t */\r\n\tpublic abstract int process(Buffer input, Buffer output);\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/VideoCodec.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.codec.video;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.codec.Codec;\r\n\r\n/**\r\n * Video codec abstract class\r\n * \r\n * @author jexa7410\r\n */\r\npublic abstract class VideoCodec extends Codec {\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h263/H263Config.java",
    "content": "/*******************************************************************************\r\n * Software Name : RCS IMS Stack\r\n *\r\n * Copyright (C) 2010 France Telecom S.A.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n *      http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n ******************************************************************************/\r\n\r\npackage com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263;\r\n\r\n/**\r\n * Default H263 settings.\r\n *\r\n * @author hlxn7157\r\n */\r\npublic class H263Config {\r\n    /**\r\n     * H263 Codec Name\r\n     */\r\n    public final static String CODEC_NAME = \"h263-2000\";\r\n\r\n    /**\r\n     * Default clock rate\r\n     */\r\n    public final static int CLOCK_RATE = 90000;\r\n\r\n    /**\r\n     * Default codec params\r\n     */\r\n    public final static String CODEC_PARAMS = \"profile=0;level=10\";\r\n//    public final static String CODEC_PARAMS = \"profile=0;level=20\";\r\n\r\n    /**\r\n     * Default video width\r\n     */\r\n//    public final static int VIDEO_WIDTH = 176;\r\n    public final static int VIDEO_WIDTH = 352;\r\n\r\n    /**\r\n     * Default video height\r\n     */\r\n//    public final static int VIDEO_HEIGHT = 144;\r\n    public final static int VIDEO_HEIGHT = 288;\r\n\r\n    /**\r\n     * Default video frame rate\r\n     */\r\n    public final static int FRAME_RATE = 12;\r\n\r\n    /**\r\n     * Default video bit rate\r\n     */\r\n    public final static int BIT_RATE = 128000;\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h263/H263RtpHeader.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263;\r\n\r\n/**\r\n * RFC 4629: a special header is added to each H263+ packet that\r\n * immediately follows the RTP header:\r\n *\r\n *   0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5\r\n *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\r\n *  |   RR    |P|V|   PLEN    |PEBIT|\r\n *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\r\n */\r\npublic class H263RtpHeader{\r\n\r\n\tpublic int HEADER_SIZE = 2;\t\r\n\t\r\n\tpublic byte RR;\r\n\tpublic boolean P;\r\n\tpublic boolean V;\r\n\tpublic int PLEN;\r\n\tpublic int PEBIT;\r\n\r\n\t/**\r\n\t * Constructor\r\n\t * \r\n\t * @param RR\r\n\t * @param P\r\n\t * @param V\r\n\t * @param PLEN\r\n\t * @param PEBIT\r\n\t */\r\n\tpublic H263RtpHeader(final byte RR, final boolean P, final boolean V, final int PLEN, final int PEBIT){\r\n\t\tthis.RR = RR;\r\n\t\tthis.P = P;\r\n\t\tthis.V = V;\r\n\t\tthis.PLEN = PLEN;\r\n\t\tthis.PEBIT = PEBIT;\r\n\t}\t\t\r\n\t\r\n\t/**\r\n\t * Constructor\r\n\t * \r\n\t * @param data\r\n\t */\r\n\tpublic H263RtpHeader(byte[] data){\r\n\t\tRR = (byte)(data[0]>>3);\r\n\t\tP = (data[0]&0x4) != 0;\r\n\t\tV = (data[0]&0x2) != 0;\r\n\t\tPLEN = ((data[0]&0x1)<<5)|(data[1]>>3);\r\n\t\tPEBIT = data[1]&0x7;\r\n\t}\r\n\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h263/JavaDepacketizer.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.VideoCodec;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.Format;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer;\n\r\n/**\r\n * Reassembles H263+ RTP packets into H263+ frames, as per RFC 4629\r\n * Complete frames are sent to decoder once reassembled\r\n */\r\npublic class JavaDepacketizer extends VideoCodec {\r\n\r\n\t/**\r\n\t * Collection of frameAssemblers.\r\n\t * Allows the construction of several frames if incoming packets are ouf of order\r\n\t */\r\n\tFrameAssemblerCollection assemblersCollection = new FrameAssemblerCollection();\r\n\t\r\n\t/**\r\n\t * Max frame size to give for next module, as some decoder have frame size limits\r\n\t */\r\n\tprivate static int MAX_H263P_FRAME_SIZE = 8192;\r\n\r\n\t/**\r\n\t * Constructor\r\n\t */\r\n\tpublic JavaDepacketizer(){\r\n\t}\r\n\t\r\n\t/**\r\n\t * Performs the media processing defined by this codec\r\n\t * \r\n\t * @param input The buffer that contains the media data to be processed\r\n\t * @param output The buffer in which to store the processed media data\r\n\t * @return Processing result\r\n\t */\r\n\tpublic int process(Buffer input, Buffer output){\r\n\t\tif (!input.isDiscard())\t{\t\t\t\r\n\t\t\tassemblersCollection.put(input);\r\n\t\t\t\r\n\t\t\tif (assemblersCollection.getLastActiveAssembler().complete()){\r\n\t\t\t\tassemblersCollection.getLastActiveAssembler().copyToBuffer(output);\r\n\t\t\t\tassemblersCollection.removeOldestThan(input.getTimeStamp());\r\n\t\t\t\treturn BUFFER_PROCESSED_OK;\r\n\t\t\t}else{\t\r\n\t\t\t\toutput.setDiscard(true);\r\n\t\t\t\treturn OUTPUT_BUFFER_NOT_FILLED;\r\n\t\t\t}\t\t\r\n\t\t}else{\r\n\t\t\toutput.setDiscard(true);\r\n\t\t\treturn OUTPUT_BUFFER_NOT_FILLED;\r\n\t\t}\t\t\r\n\t}\r\n\t\r\n\t/**\r\n\t * Used to assemble fragments with the same timestamp into a single frame.\r\n\t */\r\n\tstatic class FrameAssembler{\r\n\t\tprivate boolean rtpMarker = false; // have we received the RTP marker that signifies the end of a frame?\r\n\t\tprivate byte[] reassembledData = null;\r\n\t\tprivate long timeStamp = -1;\r\n\t\tprivate Format format = null;\r\n\r\n\t\t/**\r\n\t\t * Add the buffer (which contains a fragment) to the assembler.\r\n\t\t */\r\n\t\tpublic void put(Buffer buffer){\r\n\t\t\t// Read rtpMarker\r\n\t\t\trtpMarker = (buffer.isRTPMarkerSet());\r\n\r\n\t\t\tif (buffer.getLength() <= 2){\r\n\t\t\t\treturn; // no actual data in buffer, no need to keep.  Typically happens when RTP marker is set.\r\n\t\t\t}\r\n\t\t\t\r\n\t\t\tbyte[] currentRtpPacketData = ((byte[])buffer.getData());\r\n\t\t    H263RtpHeader h263PRtpHeader = new H263RtpHeader(currentRtpPacketData);\r\n\t\t    \r\n\t\t    int headerSize = h263PRtpHeader.HEADER_SIZE;\r\n\t\t    if (h263PRtpHeader.V) {\r\n        \t    // There's an extra VRC byte at the end of the header:\r\n        \t    ++headerSize;\r\n        \t  }\r\n        \t  \r\n        \t  if (h263PRtpHeader.PLEN > 0) {\r\n        \t    // There's an extra picture header at the end:\r\n        \t\t  headerSize += h263PRtpHeader.PLEN;\r\n        \t  }\r\n\r\n        \t  if (h263PRtpHeader.P) {\r\n        \t    // Prepend two zero bytes to the start of the payload proper\r\n        \t    // Hack: Do this by shrinking header by 2 bytes\r\n        \t\theaderSize -= 2;\r\n        \t    currentRtpPacketData[headerSize] = 0x00;\r\n        \t    currentRtpPacketData[headerSize+1] = 0x00;\r\n        \t  }\r\n\r\n        \t  if (reassembledData == null){\r\n        \t\t  // First packet\r\n        \t\t  timeStamp = buffer.getTimeStamp();\r\n        \t\t  format = buffer.getFormat();\r\n        \t\t\r\n        \t\t  // Copy packet data to reassembledData\r\n        \t\t  reassembledData = new byte[currentRtpPacketData.length-headerSize];\r\n        \t\t  System.arraycopy(currentRtpPacketData, headerSize, reassembledData, 0, currentRtpPacketData.length-headerSize);\r\n        \t  } else {\r\n    \t\t\t// Concatenate new data to reassembledData\r\n        \t\tbyte[] data = new byte[reassembledData.length+buffer.getLength()];\r\n       \t\t\tSystem.arraycopy(reassembledData, 0, data, 0, reassembledData.length);\r\n       \t\t\tSystem.arraycopy(currentRtpPacketData, headerSize, data, reassembledData.length, buffer.getLength());\r\n    \t\t\t// Copy data to reassembledData\r\n    \t\t\treassembledData = new byte[data.length];\r\n    \t\t\tSystem.arraycopy(data, 0, reassembledData, 0, data.length);\r\n      \t\t}        \t  \r\n\t\t}\r\n\t\t\r\n\t\t/**\r\n\t\t * Is the frame complete?\r\n\t\t */\r\n\t\tpublic boolean complete(){\t\r\n\t\t\tif (!rtpMarker){\r\n\t\t\t\treturn false;\t// need an rtp marker to signify end\r\n\t\t\t}\r\n\t\t\tif (reassembledData.length <= 0){\r\n\t\t\t\treturn false;\t// need data beyond the header\r\n\t\t\t}\r\n\t\t\t// TODO: if some of the last ones come in after the marker, there will be blank squares in the lower right.\r\n\t\t\treturn true;\r\n\t\t}\t\r\n\t\t\r\n\t\t/**\r\n\t\t * Assumes that complete() has been called and returns true.\r\n\t\t */\r\n\t\tprivate void copyToBuffer(Buffer bDest){\r\n\t\t\tif (!rtpMarker)\r\n\t\t\t\tthrow new IllegalStateException();\r\n\t\t\tif (reassembledData.length <= 0)\r\n\t\t\t\tthrow new IllegalStateException();\t\r\n\t\t\t\r\n\t\t\tif (reassembledData.length<=MAX_H263P_FRAME_SIZE){\r\n\t\t\t\t// If the frame data can be processed by native module, ie reassembled frame size not too big \r\n\t\t\t\t// Set buffer\r\n\t\t\t\tbDest.setData(reassembledData);\r\n\t\t\t\tbDest.setLength(reassembledData.length);\r\n\t\t\t\tbDest.setOffset(0);\r\n\t\t\t\tbDest.setTimeStamp(timeStamp);\r\n\t\t\t\tbDest.setFormat(format);\r\n\t\t\t\tbDest.setFlags(Buffer.FLAG_RTP_MARKER | Buffer.FLAG_RTP_TIME);\r\n\t\t\t}\r\n\t\t\t// Set reassembledData to null\r\n\t\t\treassembledData = null;\r\n\t\t}\r\n\t\t\r\n\t\t/**\r\n\t\t * Get timestamp\r\n\t\t * \r\n\t\t * @return long\r\n\t\t */\r\n\t\tpublic long getTimeStamp(){\r\n\t\t\treturn timeStamp;\r\n\t\t}\r\n\t}\r\n\t\r\n\t/**\r\n\t * Used to manage different timestamps, as packets could be coming not in order.\r\n\t * \r\n\t * Data is an array of FrameAssemblers, sorted by timestamps (oldest is first, newest is last)\r\n\t */\r\n\tstatic class FrameAssemblerCollection{\r\n\t\tfinal static int NUMBER_OF_ASSEMBLERS = 5;\r\n\t\tprivate FrameAssembler[] assemblers = new FrameAssembler[NUMBER_OF_ASSEMBLERS];\r\n\t\tprivate int activeAssembler = 0;\r\n\t\tprivate int numberOfAssemblers = 0;\r\n\t\t\r\n\t\t/**\r\n\t\t * Add the buffer (which contains a fragment) to the right assembler.  \r\n\t\t */\r\n\t\tpublic void put(Buffer buffer){\r\n\t\t\tactiveAssembler = getAssembler(buffer.getTimeStamp());\r\n\t\t\tassemblers[activeAssembler].put(buffer);\r\n\t\t}\r\n\t\t\r\n\t\t/**\r\n\t\t * Get the active frame assembler\r\n\t\t * \r\n\t\t * @return frameAssembler Last active assembler\r\n\t\t */\r\n\t\tpublic FrameAssembler getLastActiveAssembler(){\r\n\t\t\treturn assemblers[activeAssembler];\r\n\t\t}\r\n\t\t\r\n\t\t/**\r\n\t\t * Create a new frame assembler for given timeStamp\r\n\t\t * \r\n\t\t * @param timeStamp\r\n\t\t * @return assembler number Position of the assembler in the collection\r\n\t\t */\r\n\t\tpublic int createNewAssembler(long timeStamp){\r\n\t\t\tint spot = -1;\r\n\t\t\tif (numberOfAssemblers< NUMBER_OF_ASSEMBLERS-1){\r\n\t\t\t\t// If there's enough space left to create a new assembler\r\n\t\t\t\t// We search its spot\r\n\t\t\t\tfor (int i=0;i<numberOfAssemblers;i++){\r\n\t\t\t\t\tif (timeStamp <assemblers[i].getTimeStamp()){\r\n\t\t\t\t\t\tspot = i;\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t\tif (spot ==-1){\r\n\t\t\t\t\tspot = numberOfAssemblers;\r\n\t\t\t\t}\r\n\t\t\t\tnumberOfAssemblers++;\r\n\t\t\t\t// Decale all assemblers with newest timeStamp to the right\r\n\t\t\t\tfor (int i=numberOfAssemblers;i>spot;i--){\r\n\t\t\t\t\tassemblers[i] = assemblers[i-1];\r\n\t\t\t\t}\r\n\t\t\t\tassemblers[spot] = new FrameAssembler();\r\n\t\t\t}else{\r\n\t\t\t\t// Not enough space, we destroy the oldest assembler\r\n\t\t\t\tfor (int i=1;i<NUMBER_OF_ASSEMBLERS;i++){\r\n\t\t\t\t\tassemblers[i-1]=assemblers[i];\r\n\t\t\t\t}\r\n\t\t\t\t// Last spot is for the new assembler\r\n\t\t\t\tassemblers[NUMBER_OF_ASSEMBLERS-1] = new FrameAssembler();\r\n\t\t\t\tspot = numberOfAssemblers;\r\n\t\t\t}\r\n\t\t\treturn spot;\r\n\t\t}\r\n\t\t\r\n\t\t/**\r\n\t\t * Get the assembler used for given timestamp\r\n\t\t *\r\n\t\t * @param timeStamp\r\n\t\t * @return  FrameAssembler associated to timeStamp\r\n\t\t */\r\n\t\tpublic int getAssembler(long timeStamp){\r\n\t\t\tint assemblerNumber = -1;\r\n\t\t\tfor (int i=0; i<numberOfAssemblers; i++){\r\n\t\t\t\tif (assemblers[i].getTimeStamp() == timeStamp){\r\n\t\t\t\t\tassemblerNumber = i;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t\tif (assemblerNumber==-1){\r\n\t\t\t\t// Given timestamp never used, we create a new assembler\r\n\t\t\t\tassemblerNumber = createNewAssembler(timeStamp);\r\n\t\t\t}\r\n\t\t\treturn assemblerNumber;\r\n\t\t}\r\n\t\t\r\n\t\t/**\r\n\t\t * Remove oldest FrameAssembler than given timeStamp \r\n\t\t * (if given timeStamp has been rendered, then oldest ones are no more of no use)\r\n\t\t * This also removes given timeStamp\r\n\t\t * \r\n\t\t * @param timeStamp\r\n\t\t */\r\n\t\tpublic void removeOldestThan(long timeStamp){\r\n\t\t\t// Find spot from which to remove\r\n\t\t\tint spot = numberOfAssemblers-1;\r\n\t\t\tfor (int i=0;i<numberOfAssemblers;i++){\r\n\t\t\t\tif (timeStamp <=assemblers[i].getTimeStamp()){\r\n\t\t\t\t\tspot = i;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t\t// remove all assemblers with oldest timeStamp to the left\r\n\t\t\tfor (int i=numberOfAssemblers;i>spot;i--){\r\n\t\t\t\tassemblers[i-1] = assemblers[i];\r\n\t\t\t}\r\n\t\t\tnumberOfAssemblers -=spot+1;\r\n\t\t}\r\n\t}\r\n}\r\n\r\n\t\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h263/JavaPacketizer.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.VideoCodec;\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer;\r\n\r\n/**\r\n * Reassembles H263+ RTP packets into H263+ frames, as per RFC 4629\r\n */\r\npublic class JavaPacketizer extends VideoCodec {\r\n\t/**\r\n\t * Because packets can come out of order, it is possible that some packets for a newer frame\r\n\t * may arrive while an older frame is still incomplete.  However, in the case where we get nothing\r\n\t * but incomplete frames, we don't want to keep all of them around forever.\r\n\t */\r\n\tpublic JavaPacketizer(){\r\n\t}\r\n\t\r\n\tpublic int process(Buffer input, Buffer output){\t\t\r\n\t\tif (!input.isDiscard())\t{\t\t\t\r\n\t\t\t// Add H263+ RTP header\r\n\t\t\t/*\t      0                   1\r\n\t\t\t\t      0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5\r\n\t\t\t\t     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\r\n\t\t\t\t     |   RR    |P|V|   PLEN    |PEBIT|\t\t+ 2 null bytes\r\n\t\t\t\t     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+\r\n\t\t\t\t\t----------------------------------------\r\n\t\t\t\t\t|0000 0100|0000 0000|0000 0000|0000 0000|\r\n\t\t\t\t\t----------------------------------------\r\n\t\t\t\t\tOnly bit set is P = 1\r\n\t\t\t*/\t\t\r\n\t\t\tbyte h263header[] = new byte[2];\r\n\t\t\th263header[0]= 0x04;\r\n\t\t\th263header[1]= 0x00;\r\n\t\t\t\r\n\t\t\tbyte[] bufferData = (byte[])input.getData();\r\n\t\t\tbyte data[] = new byte[bufferData.length+h263header.length];\r\n\t\t\t// write h263 payload\r\n\t\t\tSystem.arraycopy(h263header, 0, data, 0, h263header.length);\r\n\t\t\t// Write data\r\n\t\t\tSystem.arraycopy(bufferData, 0, data, h263header.length, bufferData.length);\r\n\t\t\t\t\t\t\r\n\t\t\tif (data.length > 0){\r\n\t\t\t\t// Copy to buffer\r\n\t\t\t\toutput.setFormat(input.getFormat());\r\n\t\t\t\toutput.setData(data);\r\n\t\t\t\toutput.setLength(data.length);\r\n\t\t\t\toutput.setOffset(0);\r\n\t\t\t\toutput.setTimeStamp(input.getTimeStamp());\r\n\t\t\t\toutput.setFlags(Buffer.FLAG_RTP_MARKER | Buffer.FLAG_RTP_TIME);\r\n\t\t\t}\r\n\t\t\treturn BUFFER_PROCESSED_OK;\r\n\t\t}else{\r\n\t\t\toutput.setDiscard(true);\r\n\t\t\treturn OUTPUT_BUFFER_NOT_FILLED;\r\n\t\t}\t\t\r\n\t}\r\n\t\r\n}\r\n\r\n\t\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h263/decoder/NativeH263Decoder.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.decoder;\r\n\r\n\r\n// Referenced classes of package com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.decoder:\r\n//            VideoSample\r\n\r\npublic class NativeH263Decoder\r\n{\r\n\r\n    public NativeH263Decoder()\r\n    {\r\n    }\r\n\r\n    public static native int InitDecoder(int i, int j);\r\n\r\n    public static native int DeinitDecoder();\r\n\r\n    public static native int DecodeAndConvert(byte abyte0[], int ai[], long l);\r\n\r\n    public static native int InitParser(String s);\r\n\r\n    public static native int DeinitParser();\r\n\r\n    public static native int getVideoLength();\r\n\r\n    public static native int getVideoWidth();\r\n\r\n    public static native int getVideoHeight();\r\n\r\n    public static native String getVideoCoding();\r\n\r\n    public static native VideoSample getVideoSample(int ai[]);\r\n\r\n    static \r\n    {\r\n        String libname = \"H263Decoder\";\r\n        try\r\n        {\r\n            System.loadLibrary(libname);\r\n        }\r\n        catch(Exception exception) { }\r\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h263/decoder/VideoSample.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.decoder;\r\n\r\n\r\npublic class VideoSample\r\n{\r\n\r\n    public byte data[];\r\n    public int timestamp;\r\n\r\n    public VideoSample(byte data[], int timestamp)\r\n    {\r\n        this.data = null;\r\n        this.timestamp = 0;\r\n        this.data = data;\r\n        this.timestamp = timestamp;\r\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h263/encoder/NativeH263Encoder.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.encoder;\r\n\r\n\r\n// Referenced classes of package com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.encoder:\r\n//            NativeH263EncoderParams\r\n\r\npublic class NativeH263Encoder\r\n{\r\n\r\n    public NativeH263Encoder()\r\n    {\r\n    }\r\n\r\n    public static native int InitEncoder(NativeH263EncoderParams nativeh263encoderparams);\r\n\r\n    public static native byte[] EncodeFrame(byte abyte0[], long l);\r\n\r\n    public static native int DeinitEncoder();\r\n\r\n    static \r\n    {\r\n        String libname = \"H263Encoder\";\r\n        try\r\n        {\r\n            System.loadLibrary(libname);\r\n        }\r\n        catch(UnsatisfiedLinkError unsatisfiedlinkerror) { }\r\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h263/encoder/NativeH263EncoderParams.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.encoder;\r\n\r\n\r\npublic class NativeH263EncoderParams\r\n{\r\n\r\n    public static final int SIMPLE_PROFILE_LEVEL0 = 0;\r\n    public static final int SIMPLE_PROFILE_LEVEL1 = 1;\r\n    public static final int SIMPLE_PROFILE_LEVEL2 = 2;\r\n    public static final int SIMPLE_PROFILE_LEVEL3 = 3;\r\n    public static final int CORE_PROFILE_LEVEL1 = 4;\r\n    public static final int CORE_PROFILE_LEVEL2 = 5;\r\n    public static final int SIMPLE_SCALABLE_PROFILE_LEVEL0 = 6;\r\n    public static final int SIMPLE_SCALABLE_PROFILE_LEVEL1 = 7;\r\n    public static final int SIMPLE_SCALABLE_PROFILE_LEVEL2 = 8;\r\n    public static final int CORE_SCALABLE_PROFILE_LEVEL1 = 10;\r\n    public static final int CORE_SCALABLE_PROFILE_LEVEL2 = 11;\r\n    public static final int CORE_SCALABLE_PROFILE_LEVEL3 = 12;\r\n    public static final int SHORT_HEADER = 0;\r\n    public static final int SHORT_HEADER_WITH_ERR_RES = 1;\r\n    public static final int H263_MODE = 2;\r\n    public static final int H263_MODE_WITH_ERR_RES = 3;\r\n    public static final int DATA_PARTITIONING_MODE = 4;\r\n    public static final int COMBINE_MODE_NO_ERR_RES = 5;\r\n    public static final int COMBINE_MODE_WITH_ERR_RES = 6;\r\n    public static final int CONSTANT_Q = 0;\r\n    public static final int CBR_1 = 1;\r\n    public static final int VBR_1 = 2;\r\n    public static final int CBR_2 = 3;\r\n    public static final int VBR_2 = 4;\r\n    public static final int CBR_LOWDELAY = 5;\r\n    private int encMode;\r\n    private int packetSize;\r\n    private int profile_level;\r\n    private boolean rvlcEnable;\r\n    private int gobHeaderInterval;\r\n    private int numLayers;\r\n    private int timeIncRes;\r\n    private int tickPerSrc;\r\n    private int encHeight;\r\n    private int encWidth;\r\n    private float encFrameRate;\r\n    private int bitRate;\r\n    private int iQuant;\r\n    private int pQuant;\r\n    private int quantType;\r\n    private int rcType;\r\n    private float vbvDelay;\r\n    private boolean noFrameSkipped;\r\n    private int intraPeriod;\r\n    private int numIntraMB;\r\n    private boolean sceneDetect;\r\n    private int searchRange;\r\n    private boolean mv8x8Enable;\r\n    private int intraDCVlcTh;\r\n    private boolean useACPred;\r\n\r\n    public NativeH263EncoderParams()\r\n    {\r\n        encMode = 2;\r\n        packetSize = 1024;\r\n        profile_level = 3;\r\n        rvlcEnable = false;\r\n        gobHeaderInterval = 0;\r\n        numLayers = 1;\r\n        timeIncRes = 1000;\r\n        tickPerSrc = 125;\r\n        encHeight = 144;\r\n        encWidth = 176;\r\n        encFrameRate = 8F;\r\n        bitRate = 64000;\r\n        iQuant = 15;\r\n        pQuant = 12;\r\n        quantType = 0;\r\n        rcType = 1;\r\n        vbvDelay = 1.5F;\r\n        noFrameSkipped = false;\r\n        intraPeriod = -1;\r\n        numIntraMB = 0;\r\n        sceneDetect = false;\r\n        searchRange = 4;\r\n        mv8x8Enable = true;\r\n        intraDCVlcTh = 0;\r\n        useACPred = false;\r\n    }\r\n\r\n    public int getEncMode()\r\n    {\r\n        return encMode;\r\n    }\r\n\r\n    public int getPacketSize()\r\n    {\r\n        return packetSize;\r\n    }\r\n\r\n    public int getProfile_level()\r\n    {\r\n        return profile_level;\r\n    }\r\n\r\n    public boolean isRvlcEnable()\r\n    {\r\n        return rvlcEnable;\r\n    }\r\n\r\n    public int getGobHeaderInterval()\r\n    {\r\n        return gobHeaderInterval;\r\n    }\r\n\r\n    public int getNumLayers()\r\n    {\r\n        return numLayers;\r\n    }\r\n\r\n    public int getTimeIncRes()\r\n    {\r\n        return timeIncRes;\r\n    }\r\n\r\n    public int getTickPerSrc()\r\n    {\r\n        return tickPerSrc;\r\n    }\r\n\r\n    public int getEncHeight()\r\n    {\r\n        return encHeight;\r\n    }\r\n\r\n    public int getEncWidth()\r\n    {\r\n        return encWidth;\r\n    }\r\n\r\n    public float getEncFrameRate()\r\n    {\r\n        return encFrameRate;\r\n    }\r\n\r\n    public int getBitRate()\r\n    {\r\n        return bitRate;\r\n    }\r\n\r\n    public int getIQuant()\r\n    {\r\n        return iQuant;\r\n    }\r\n\r\n    public int getPQuant()\r\n    {\r\n        return pQuant;\r\n    }\r\n\r\n    public int getQuantType()\r\n    {\r\n        return quantType;\r\n    }\r\n\r\n    public int getRcType()\r\n    {\r\n        return rcType;\r\n    }\r\n\r\n    public boolean isNoFrameSkipped()\r\n    {\r\n        return noFrameSkipped;\r\n    }\r\n\r\n    public int getIntraPeriod()\r\n    {\r\n        return intraPeriod;\r\n    }\r\n\r\n    public int getNumIntraMB()\r\n    {\r\n        return numIntraMB;\r\n    }\r\n\r\n    public boolean isSceneDetect()\r\n    {\r\n        return sceneDetect;\r\n    }\r\n\r\n    public int getSearchRange()\r\n    {\r\n        return searchRange;\r\n    }\r\n\r\n    public boolean isMv8x8Enable()\r\n    {\r\n        return mv8x8Enable;\r\n    }\r\n\r\n    public int getIntraDCVlcTh()\r\n    {\r\n        return intraDCVlcTh;\r\n    }\r\n\r\n    public boolean isUseACPred()\r\n    {\r\n        return useACPred;\r\n    }\r\n\r\n    public void setEncMode(int encMode)\r\n    {\r\n        this.encMode = encMode;\r\n    }\r\n\r\n    public void setPacketSize(int packetSize)\r\n    {\r\n        this.packetSize = packetSize;\r\n    }\r\n\r\n    public void setProfile_level(int profile_level)\r\n    {\r\n        this.profile_level = profile_level;\r\n    }\r\n\r\n    public void setRvlcEnable(boolean rvlcEnable)\r\n    {\r\n        this.rvlcEnable = rvlcEnable;\r\n    }\r\n\r\n    public void setGobHeaderInterval(int gobHeaderInterval)\r\n    {\r\n        this.gobHeaderInterval = gobHeaderInterval;\r\n    }\r\n\r\n    public void setNumLayers(int numLayers)\r\n    {\r\n        this.numLayers = numLayers;\r\n    }\r\n\r\n    public void setTimeIncRes(int timeIncRes)\r\n    {\r\n        this.timeIncRes = timeIncRes;\r\n    }\r\n\r\n    public void setTickPerSrc(int tickPerSrc)\r\n    {\r\n        this.tickPerSrc = tickPerSrc;\r\n    }\r\n\r\n    public void setEncHeight(int encHeight)\r\n    {\r\n        this.encHeight = encHeight;\r\n    }\r\n\r\n    public void setEncWidth(int encWidth)\r\n    {\r\n        this.encWidth = encWidth;\r\n    }\r\n\r\n    public void setEncFrameRate(float encFrameRate)\r\n    {\r\n        this.encFrameRate = encFrameRate;\r\n    }\r\n\r\n    public void setBitRate(int bitRate)\r\n    {\r\n        this.bitRate = bitRate;\r\n    }\r\n\r\n    public void setIQuant(int quant)\r\n    {\r\n        iQuant = quant;\r\n    }\r\n\r\n    public void setPQuant(int quant)\r\n    {\r\n        pQuant = quant;\r\n    }\r\n\r\n    public void setQuantType(int quantType)\r\n    {\r\n        this.quantType = quantType;\r\n    }\r\n\r\n    public void setRcType(int rcType)\r\n    {\r\n        this.rcType = rcType;\r\n    }\r\n\r\n    public void setNoFrameSkipped(boolean noFrameSkipped)\r\n    {\r\n        this.noFrameSkipped = noFrameSkipped;\r\n    }\r\n\r\n    public void setIntraPeriod(int intraPeriod)\r\n    {\r\n        this.intraPeriod = intraPeriod;\r\n    }\r\n\r\n    public void setNumIntraMB(int numIntraMB)\r\n    {\r\n        this.numIntraMB = numIntraMB;\r\n    }\r\n\r\n    public void setSceneDetect(boolean sceneDetect)\r\n    {\r\n        this.sceneDetect = sceneDetect;\r\n    }\r\n\r\n    public void setSearchRange(int searchRange)\r\n    {\r\n        this.searchRange = searchRange;\r\n    }\r\n\r\n    public void setMv8x8Enable(boolean mv8x8Enable)\r\n    {\r\n        this.mv8x8Enable = mv8x8Enable;\r\n    }\r\n\r\n    public void setIntraDCVlcTh(int intraDCVlcTh)\r\n    {\r\n        this.intraDCVlcTh = intraDCVlcTh;\r\n    }\r\n\r\n    public void setUseACPred(boolean useACPred)\r\n    {\r\n        this.useACPred = useACPred;\r\n    }\r\n\r\n    public float getVbvDelay()\r\n    {\r\n        return vbvDelay;\r\n    }\r\n\r\n    public void setVbvDelay(float vbvDelay)\r\n    {\r\n        this.vbvDelay = vbvDelay;\r\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h264/H264Config.java",
    "content": "/*******************************************************************************\r\n * Software Name : RCS IMS Stack\r\n *\r\n * Copyright (C) 2010 France Telecom S.A.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n *      http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n ******************************************************************************/\r\n\r\npackage com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h264;\r\n\r\n/**\r\n * Default H264 Settings\r\n *\r\n * @author hlxn7157\r\n */\r\npublic class H264Config {\r\n    /**\r\n     * H264 Codec Name\r\n     */\r\n    public final static String CODEC_NAME = \"h264\";\r\n\r\n    /**\r\n     * Default clock rate\r\n     */\r\n    public final static int CLOCK_RATE = 90000;\r\n\r\n    /**\r\n     * Default codec params\r\n     */\r\n    public final static String CODEC_PARAMS = \"profile-level-id=42900B\";\r\n\r\n    /**\r\n     * Default video width\r\n     */\r\n//    public final static int VIDEO_WIDTH = 176;\r\n    public final static int VIDEO_WIDTH = 352;\r\n\r\n    /**\r\n     * Default video height\r\n     */\r\n//    public final static int VIDEO_HEIGHT = 144;\r\n    public final static int VIDEO_HEIGHT = 288;\r\n\r\n    /**\r\n     * Default video frame rate\r\n     */\r\n    public final static int FRAME_RATE = 15;\r\n\r\n    /**\r\n     * Default video bit rate\r\n     */\r\n//    public final static int BIT_RATE = 64000;\r\n    public final static int BIT_RATE = 384000;\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h264/decoder/NativeH264Decoder.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h264.decoder;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.decoder.VideoSample;\r\n\r\npublic class NativeH264Decoder\r\n{\r\n\r\n    public NativeH264Decoder()\r\n    {\r\n    }\r\n\r\n    public static native int InitDecoder();\r\n\r\n    public static native int DeinitDecoder();\r\n\r\n    public static synchronized native int DecodeAndConvert(byte abyte0[], int ai[]);\r\n\r\n    public static native int InitParser(String s);\r\n\r\n    public static native int DeinitParser();\r\n\r\n    public static native int getVideoLength();\r\n\r\n    public static native int getVideoWidth();\r\n\r\n    public static native int getVideoHeight();\r\n\r\n    public static native String getVideoCoding();\r\n\r\n    public static native VideoSample getVideoSample(int ai[]);\r\n\r\n    static \r\n    {\r\n        String libname = \"H264Decoder\";\r\n        try\r\n        {\r\n            System.loadLibrary(libname);\r\n        }\r\n        catch(Exception exception) { }\r\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/codec/video/h264/encoder/NativeH264Encoder.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h264.encoder;\r\n\r\n\r\npublic class NativeH264Encoder\r\n{\r\n\r\n    public NativeH264Encoder()\r\n    {\r\n    }\r\n\r\n    public static native int InitEncoder(int i, int j, int k);\r\n\r\n    public static native byte[] EncodeFrame(byte abyte0[], long l);\r\n\r\n    public static native int DeinitEncoder();\r\n\r\n    public static native int getLastEncodeStatus();\r\n\r\n    static \r\n    {\r\n        String libname = \"H264Encoder\";\r\n        try\r\n        {\r\n            System.loadLibrary(libname);\r\n        }\r\n        catch(UnsatisfiedLinkError unsatisfiedlinkerror) { }\r\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpAppPacket.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.core;\r\n\r\nimport java.io.DataOutputStream;\nimport java.io.IOException;\n\r\n/**\r\n * RTCP APP packet\r\n * \r\n * @author jexa7410\r\n */\r\npublic class RtcpAppPacket extends RtcpPacket {\r\n\tpublic int ssrc;\r\n\tpublic int name;\r\n\tpublic int subtype;\r\n\r\n\tpublic RtcpAppPacket(RtcpPacket parent) {\r\n\t\tsuper(parent);\r\n\t\t\r\n\t\tsuper.type = 204;\r\n\t}\r\n\r\n\tpublic RtcpAppPacket(int ssrc, int name, int subtype, byte data[]) {\r\n\t\tthis.ssrc = ssrc;\r\n\t\tthis.name = name;\r\n\t\tthis.subtype = subtype;\r\n\t\tthis.data = data;\r\n\t\tsuper.type = 204;\r\n\r\n\t\tif ((data.length & 3) != 0) {\r\n\t\t\tthrow new IllegalArgumentException(\"Bad data length\");\r\n\t\t}\r\n\t\tif (subtype < 0 || subtype > 31) {\r\n\t\t\tthrow new IllegalArgumentException(\"Bad subtype\");\r\n\t\t} else {\r\n\t\t\treturn;\r\n\t\t}\r\n\t}\r\n\r\n\tpublic int calcLength() {\r\n\t\treturn 12 + data.length;\r\n\t}\r\n\r\n\tpublic void assemble(DataOutputStream out) throws IOException {\r\n\t\tout.writeByte(128 + subtype);\r\n\t\tout.writeByte(204);\r\n\t\tout.writeShort(2 + (data.length >> 2));\r\n\t\tout.writeInt(ssrc);\r\n\t\tout.writeInt(name);\r\n\t\tout.write(data);\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpByePacket.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.core;\r\n\r\nimport java.io.DataOutputStream;\r\nimport java.io.IOException;\r\n\r\n/**\r\n * RTCP BYE packet\r\n * \r\n * @author jexa7410\r\n */\r\npublic class RtcpByePacket extends RtcpPacket {\r\n\r\n\tpublic int ssrc[];\r\n\tpublic byte reason[];\r\n\r\n\tpublic RtcpByePacket(RtcpPacket parent) {\r\n\t\tsuper(parent);\r\n\t\tsuper.type = 203;\r\n\t}\r\n\r\n\tpublic RtcpByePacket(int ssrc[], byte reason[]) {\r\n\t\tthis.ssrc = ssrc;\r\n\t\tif (reason != null) {\r\n\t\t\tthis.reason = reason;\r\n\t\t} else {\r\n\t\t\tthis.reason = new byte[0];\r\n\t\t}\r\n\t\tif (ssrc.length > 31) {\r\n\t\t\tthrow new IllegalArgumentException(\"Too many SSRCs\");\r\n\t\t} else {\r\n\t\t\treturn;\r\n\t\t}\r\n\t}\r\n\r\n\tpublic int calcLength() {\r\n\t\treturn 4 + (ssrc.length << 2)\r\n\t\t\t\t+ (reason.length <= 0 ? 0 : reason.length + 4 & -4);\r\n\t}\r\n\r\n\tpublic void assemble(DataOutputStream out) throws IOException {\r\n\t\tout.writeByte(128 + ssrc.length);\r\n\t\tout.writeByte(203);\r\n\t\tout.writeShort(ssrc.length\r\n\t\t\t\t+ (reason.length <= 0 ? 0 : reason.length + 4 >> 2));\r\n\t\tfor (int i = 0; i < ssrc.length; i++) {\r\n\t\t\tout.writeInt(ssrc[i]);\r\n\t\t}\r\n\r\n\t\tif (reason.length > 0) {\r\n\t\t\tout.writeByte(reason.length);\r\n\t\t\tout.write(reason);\r\n\t\t\tfor (int i = (reason.length + 4 & -4) - reason.length - 1; i > 0; i--) {\r\n\t\t\t\tout.writeByte(0);\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpCompoundPacket.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.core;\r\n\r\nimport java.io.ByteArrayOutputStream;\r\nimport java.io.DataOutputStream;\r\nimport java.io.IOException;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.util.Packet;\r\n\r\n/**\r\n * RTCP compound packet\r\n * \r\n * @author jexa7410\r\n */\r\npublic class RtcpCompoundPacket extends RtcpPacket {\r\n\tpublic RtcpPacket[] packets;\r\n\r\n\tpublic RtcpCompoundPacket(Packet packet) {\r\n\t\tsuper(packet);\r\n\t\ttype = -1;\r\n\t}\r\n\r\n\tpublic RtcpCompoundPacket(RtcpPacket[] rtcppackets) {\r\n\t\tpackets = rtcppackets;\r\n\t\ttype = -1;\r\n\t}\r\n\r\n\tpublic void assemble(int i, boolean bool) {\r\n\t\tlength = i;\r\n\t\toffset = 0;\r\n\t\tByteArrayOutputStream bytearrayoutputstream = new ByteArrayOutputStream(\r\n\t\t\t\ti);\r\n\t\tDataOutputStream dataoutputstream = new DataOutputStream(\r\n\t\t\t\tbytearrayoutputstream);\r\n\t\tint i_0_;\r\n\t\ttry {\r\n\t\t\tif (bool)\r\n\t\t\t\toffset += 4;\r\n\t\t\ti_0_ = offset;\r\n\t\t\tfor (int i_1_ = 0; i_1_ < packets.length; i_1_++) {\r\n\t\t\t\ti_0_ = bytearrayoutputstream.size();\r\n\t\t\t\tpackets[i_1_].assemble(dataoutputstream);\r\n\t\t\t}\r\n\t\t} catch (IOException ioexception) {\r\n\t\t\tthrow new NullPointerException(\"Impossible IO Exception\");\r\n\t\t}\r\n\t\tint i_2_ = bytearrayoutputstream.size();\r\n\t\tdata = bytearrayoutputstream.toByteArray();\r\n\t\tif (i_2_ > i)\r\n\t\t\tthrow new NullPointerException(\"RTCP Packet overflow\");\r\n\t\tif (i_2_ < i) {\r\n\t\t\tif (data.length < i)\r\n\t\t\t\tSystem.arraycopy(data, 0, data = new byte[i], 0, i_2_);\r\n\t\t\tdata[i_0_] |= 0x20;\r\n\t\t\tdata[i - 1] = (byte) (i - i_2_);\r\n\t\t\tint i_3_ = (data[i_0_ + 3] & 0xff) + (i - i_2_ >> 2);\r\n\t\t\tif (i_3_ >= 256)\r\n\t\t\t\tdata[i_0_ + 2] += i - i_2_ >> 10;\r\n\t\t\tdata[i_0_ + 3] = (byte) i_3_;\r\n\t\t}\r\n\t}\r\n\r\n\tpublic void assemble(DataOutputStream dataoutputstream) throws IOException {\r\n\t\tthrow new IllegalArgumentException(\"Recursive Compound Packet\");\r\n\t}\r\n\r\n\tpublic int calcLength() {\r\n\t\tint i = 0;\r\n\t\tif (packets == null || packets.length < 1)\r\n\t\t\tthrow new IllegalArgumentException(\"Bad RTCP Compound Packet\");\r\n\t\tfor (int i_4_ = 0; i_4_ < packets.length; i_4_++)\r\n\t\t\ti += packets[i_4_].calcLength();\r\n\t\treturn i;\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpPacket.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.core;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.util.Packet;\nimport java.io.DataOutputStream;\nimport java.io.IOException;\n\n/**\n * Abstract RCTP packet\n *\n * @author jexa7410\n */\r\npublic abstract class RtcpPacket extends Packet {\r\n    /**\n     *   Version =2\n     */\n    public static final byte VERSION = 2;\n\n    /**\n    *   Padding =0\n    */\n    public static final byte PADDING = 0;\n\n    /**\n     * RTCP SR\n     */\n    public static final int RTCP_SR = 200;\n\n    /**\n     * RTCP RR\n     */\n    public static final int RTCP_RR = 201;\n\n    /**\n     * RTCP SDES\n     */\n    public static final int RTCP_SDES = 202;\n\n    /**\n     * RTCP BYE\n     */\n    public static final int RTCP_BYE = 203;\n\n    /**\n     * RTCP APP\n     */\n    public static final int RTCP_APP = 204;\n\n    /**\n     * RTCP APP\n     */\n    public static final int RTCP_COMPOUND = -1;\n\n\tpublic Packet base;\r\n\r\n\tpublic int type;\r\n\r\n\tpublic RtcpPacket() {\r\n\t}\r\n\r\n\tpublic RtcpPacket(RtcpPacket rtcppacket) {\r\n\t\tsuper((Packet)rtcppacket);\r\n\r\n\t\tbase = rtcppacket.base;\r\n\t}\r\n\r\n\tpublic RtcpPacket(Packet packet) {\r\n\t\tsuper(packet);\r\n\r\n\t\tbase = packet;\r\n\t}\r\n\r\n\tpublic abstract void assemble(DataOutputStream dataoutputstream) throws IOException;\r\n\r\n\tpublic abstract int calcLength();\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpPacketReceiver.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.core;\r\n\nimport com.orangelabs.rcs.core.ims.protocol.rtp.event.RtcpApplicationEvent;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.event.RtcpByeEvent;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.event.RtcpEvent;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.event.RtcpEventListener;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.event.RtcpReceiverReportEvent;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.event.RtcpSdesEvent;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.event.RtcpSenderReportEvent;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.util.Packet;\nimport com.orangelabs.rcs.platform.network.DatagramConnection;\nimport com.orangelabs.rcs.platform.network.NetworkFactory;\nimport com.orangelabs.rcs.utils.logger.Logger;\n\nimport java.io.ByteArrayInputStream;\nimport java.io.DataInputStream;\nimport java.io.IOException;\nimport java.util.Vector;\n\n/**\n * RTCP packet receiver\n *\n * @author jexa7410\n */\r\npublic class RtcpPacketReceiver extends Thread {\r\n\t/**\r\n\t * Datagram connection\r\n\t */\r\n    public DatagramConnection datagramConnection = null;\r\n\r\n\t/**\r\n\t * Statistics\r\n\t */\r\n\tprivate RtcpStatisticsReceiver stats =  new RtcpStatisticsReceiver();\r\n\r\n\t/**\r\n\t * RTCP event listeners\r\n\t */\r\n\tprivate Vector<RtcpEventListener> listeners = new Vector<RtcpEventListener>();\r\n\n    /**\n     * RTCP Session\n     */\n    private RtcpSession rtcpSession = null;\n\r\n\t/**\r\n\t * The logger\r\n\t */\r\n\tprivate final Logger logger = Logger.getLogger(this.getClass().getName());\n\n    /**\n     * Constructor\n     *\n     * @param port Listenning port\n     * @param rtcpSession the RTCP session\n     * @throws IOException\n     */\r\n    public RtcpPacketReceiver(int port, RtcpSession rtcpSession) throws IOException {\r\n        super();\n\n        this.rtcpSession = rtcpSession;\n\r\n\t\t// Create the UDP server\r\n\t\tdatagramConnection = NetworkFactory.getFactory().createDatagramConnection();\r\n\t\tdatagramConnection.open(port);\n\t\t\n\t\tif (logger.isActivated()) {\t \n\t\t\tlogger.debug(\"RTCP receiver created at port \" + port);\t \n        }\t\t\r\n\t}\r\n\r\n\t/**\n     * Close the receiver\n     *\n     * @throws IOException\n     */\r\n\tpublic void close() throws IOException {\r\n\t\t// Interrup the current thread processing\r\n\t\ttry {\r\n\t\t\tinterrupt();\r\n\t\t} catch(Exception e) {}\r\n\r\n\t\t// Close the datagram connection\r\n\t\tif (datagramConnection != null) {\r\n\t\t\tdatagramConnection.close();\r\n\t\t\tdatagramConnection = null;\r\n\t\t}\r\n\t}\r\n\r\n\t/**\r\n\t * Background processing\r\n\t */\r\n\tpublic void run() {\r\n\t\ttry {\r\n            while (datagramConnection != null) {\r\n\t\t\t\t// Wait a packet\r\n\t\t\t\tbyte[] data = datagramConnection.receive();\r\n\r\n\t\t        // Create a packet object\r\n\t\t\t\tPacket packet = new Packet();\r\n\t\t\t\tpacket.data = data;\r\n\t\t\t\tpacket.length = data.length;\r\n\t\t\t\tpacket.offset = 0;\r\n\t\t\t\tpacket.receivedAt = System.currentTimeMillis();\r\n\r\n\t\t        // Process the received packet\r\n\t\t\t\thandlePacket(packet);\r\n\t\t\t}\r\n\t\t} catch (Exception e) {\r\n\t\t\tif (logger.isActivated()) {\r\n\t\t\t\tlogger.error(\"Datagram socket server failed\", e);\r\n\t\t\t}\r\n\t\t}\r\n\t}\n\n    /**\n     * Handle the received packet\n     *\n     * @param packet Packet\n     * @return RTCP packet\n     */\r\n\tpublic RtcpPacket handlePacket(Packet p) {\r\n\t\t// Update statistics\r\n\t\tstats.numRtcpPkts++;\r\n\t\tstats.numRtcpBytes += p.length;\r\n\r\n\t\t// Parse the RTCP packet\r\n\t\tRtcpPacket result;\r\n\t\ttry {\r\n\t\t\tresult = parseRtcpPacket(p);\r\n\t\t} catch (Exception e) {\r\n\t\t\tstats.numBadRtcpPkts++;\r\n\t\t\treturn null;\r\n\t\t}\r\n\t\treturn result;\r\n\t}\n\n    /**\n     * Parse the RTCP packet\n     *\n     * @param packet RTCP packet not yet parsed\n     * @return RTCP packet\n     */\r\n\tpublic RtcpPacket parseRtcpPacket(Packet packet) {\r\n\t\tRtcpCompoundPacket compoundPacket = new RtcpCompoundPacket(packet);\r\n\t\tVector<RtcpPacket> subpackets = new Vector<RtcpPacket>();\r\n\t\tDataInputStream in = new DataInputStream(\r\n\t\t\t\tnew ByteArrayInputStream(compoundPacket.data,\r\n\t\t\t\t\t\tcompoundPacket.offset,\r\n\t\t\t\t\t\tcompoundPacket.length));\r\n\t\ttry {\n            rtcpSession.updateavgrtcpsize(compoundPacket.length);\r\n\t\t\tint length = 0;\r\n\t\t\tfor (int offset = 0; offset < compoundPacket.length; offset += length) {\r\n\t\t\t\t// Read first byte\r\n\t\t\t\tint firstbyte = in.readUnsignedByte();\r\n\t\t\t\tif ((firstbyte & 0xc0) != 128) {\r\n\t\t\t\t\tif (logger.isActivated()) {\r\n\t\t\t\t\t\tlogger.error(\"Bad RTCP packet version\");\r\n\t\t\t\t\t}\r\n\t\t\t\t\treturn null;\r\n\t\t\t\t}\r\n\r\n\t\t\t\t// Read type of subpacket\r\n\t\t\t\tint type = in.readUnsignedByte();\r\n\r\n\t\t\t\t// Read length of subpacket\r\n\t\t\t\tlength = in.readUnsignedShort();\r\n\t\t\t\tlength = length + 1 << 2;\r\n\t\t\t\tint padlen = 0;\r\n\t\t\t\tif (offset + length > compoundPacket.length) {\r\n\t\t\t\t\tif (logger.isActivated()) {\r\n\t\t\t\t\t\tlogger.error(\"Bad RTCP packet length\");\r\n\t\t\t\t\t}\r\n\t\t\t\t\treturn null;\r\n\t\t\t\t}\r\n\t\t\t\tif (offset + length == compoundPacket.length) {\r\n\t\t\t\t\tif ((firstbyte & 0x20) != 0) {\r\n\t\t\t\t\t\tpadlen = compoundPacket.data[compoundPacket.offset + compoundPacket.length - 1] & 0xff;\r\n\t\t\t\t\t\tif (padlen == 0) {\r\n\t\t\t\t\t\t\tif (logger.isActivated()) {\r\n\t\t\t\t\t\t\t\tlogger.error(\"Bad RTCP packet format\");\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\treturn null;\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\r\n\t\t\t\t} else\r\n\t\t\t\tif ((firstbyte & 0x20) != 0) {\r\n\t\t\t\t\tif (logger.isActivated()) {\r\n\t\t\t\t\t\tlogger.error(\"Bad RTCP packet format (P != 0)\");\r\n\t\t\t\t\t}\r\n\t\t\t\t\treturn null;\r\n\t\t\t\t}\r\n\t\t\t\tint inlength = length - padlen;\r\n\t\t\t\tfirstbyte &= 0x1f;\r\n\r\n\t\t\t\t// Parse subpacket\r\n\t\t\t\tRtcpPacket subpacket;\r\n\t\t\t\tswitch (type) {\r\n\t\t\t\t\t// RTCP SR event\r\n                    case RtcpPacket.RTCP_SR:\r\n\t\t\t\t\t\tstats.numSrPkts++;\r\n\t\t\t\t\t\tif (inlength != 28 + 24 * firstbyte) {\r\n\t\t\t\t\t\t\tstats.numMalformedRtcpPkts++;\r\n\t\t\t\t\t\t\tif (logger.isActivated()) {\r\n\t\t\t\t\t\t\t\tlogger.error(\"Bad RTCP SR packet format\");\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\treturn null;\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\tRtcpSenderReportPacket srp = new RtcpSenderReportPacket(compoundPacket);\r\n\t\t\t\t\t\tsubpacket = srp;\r\n\t\t\t\t\t\tsrp.ssrc = in.readInt();\r\n\t\t\t\t\t\tsrp.ntptimestampmsw = (long) in.readInt() & 0xffffffffL;\r\n\t\t\t\t\t\tsrp.ntptimestamplsw = (long) in.readInt() & 0xffffffffL;\r\n\t\t\t\t\t\tsrp.rtptimestamp = (long) in.readInt() & 0xffffffffL;\r\n\t\t\t\t\t\tsrp.packetcount = (long) in.readInt() & 0xffffffffL;\r\n\t\t\t\t\t\tsrp.octetcount = (long) in.readInt() & 0xffffffffL;\r\n\t\t\t\t\t\tsrp.reports = new RtcpReport[firstbyte];\r\n\n                        RtpSource sourceSR = rtcpSession.getMySource();\n                        if (sourceSR != null)\n                            sourceSR.timeOfLastRTCPArrival = rtcpSession.currentTime();\n\r\n\t\t\t\t\t\tfor (int i = 0; i < srp.reports.length; i++) {\r\n\t\t\t\t\t\t\tRtcpReport report = new RtcpReport();\r\n\t\t\t\t\t\t\tsrp.reports[i] = report;\r\n\t\t\t\t\t\t\treport.ssrc = in.readInt();\r\n\t\t\t\t\t\t\tlong val = in.readInt();\r\n\t\t\t\t\t\t\tval &= 0xffffffffL;\r\n\t\t\t\t\t\t\treport.fractionlost = (int) (val >> 24);\r\n\t\t\t\t\t\t\treport.packetslost = (int) (val & 0xffffffL);\r\n\t\t\t\t\t\t\treport.lastseq = (long) in.readInt() & 0xffffffffL;\r\n\t\t\t\t\t\t\treport.jitter = in.readInt();\r\n\t\t\t\t\t\t\treport.lsr = (long) in.readInt() & 0xffffffffL;\r\n\t\t\t\t\t\t\treport.dlsr = (long) in.readInt() & 0xffffffffL;\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\t// Notify event listeners\r\n\t\t\t\t\t\tnotifyRtcpListeners(new RtcpSenderReportEvent(srp));\r\n\t\t\t\t\t\tbreak;\r\n\r\n\t\t\t\t\t// RTCP RR event\r\n                    case RtcpPacket.RTCP_RR:\r\n\t\t\t\t\t\tif (inlength != 8 + 24 * firstbyte) {\r\n\t\t\t\t\t\t\tstats.numMalformedRtcpPkts++;\r\n\t\t\t\t\t\t\tif (logger.isActivated()) {\r\n\t\t\t\t\t\t\t\tlogger.error(\"Bad RTCP RR packet format\");\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\treturn null;\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\tRtcpReceiverReportPacket rrp = new RtcpReceiverReportPacket(compoundPacket);\r\n\t\t\t\t\t\tsubpacket = rrp;\r\n\t\t\t\t\t\trrp.ssrc = in.readInt();\r\n\t\t\t\t\t\trrp.reports = new RtcpReport[firstbyte];\n\n                        RtpSource sourceRR = rtcpSession.getMySource();\n                        if (sourceRR != null)\n                            sourceRR.timeOfLastRTCPArrival = rtcpSession.currentTime();\n\r\n\t\t\t\t\t\tfor (int i = 0; i < rrp.reports.length; i++) {\r\n\t\t\t\t\t\t\tRtcpReport report = new RtcpReport();\r\n\t\t\t\t\t\t\trrp.reports[i] = report;\r\n\t\t\t\t\t\t\treport.ssrc = in.readInt();\r\n\t\t\t\t\t\t\tlong val = in.readInt();\r\n\t\t\t\t\t\t\tval &= 0xffffffffL;\r\n\t\t\t\t\t\t\treport.fractionlost = (int) (val >> 24);\r\n\t\t\t\t\t\t\treport.packetslost = (int) (val & 0xffffffL);\r\n\t\t\t\t\t\t\treport.lastseq = (long) in.readInt() & 0xffffffffL;\r\n\t\t\t\t\t\t\treport.jitter = in.readInt();\r\n\t\t\t\t\t\t\treport.lsr = (long) in.readInt() & 0xffffffffL;\r\n\t\t\t\t\t\t\treport.dlsr = (long) in.readInt() & 0xffffffffL;\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\t// Notify event listeners\r\n\t\t\t\t\t\tnotifyRtcpListeners(new RtcpReceiverReportEvent(rrp));\r\n\t\t\t\t\t\tbreak;\r\n\r\n\t\t\t\t\t// RTCP SDES event\r\n                    case RtcpPacket.RTCP_SDES:\r\n\t\t\t\t\t\tRtcpSdesPacket sdesp = new RtcpSdesPacket(compoundPacket);\r\n\t\t\t\t\t\tsubpacket = sdesp;\r\n\t\t\t\t\t\tsdesp.sdes = new RtcpSdesBlock[firstbyte];\r\n\t\t\t\t\t\tint sdesoff = 4;\r\n\t\t\t\t\t\tfor (int i = 0; i < sdesp.sdes.length; i++) {\r\n\t\t\t\t\t\t\tRtcpSdesBlock chunk = new RtcpSdesBlock();\r\n\t\t\t\t\t\t\tsdesp.sdes[i] = chunk;\r\n\t\t\t\t\t\t\tchunk.ssrc = in.readInt();\r\n\t\t\t\t\t\t\tsdesoff += 5;\r\n\t\t\t\t\t\t\tVector<RtcpSdesItem> items = new Vector<RtcpSdesItem>();\r\n\t\t\t\t\t\t\tboolean gotcname = false;\r\n\t\t\t\t\t\t\tint j;\r\n\t\t\t\t\t\t\twhile ((j = in.readUnsignedByte()) != 0) {\r\n\t\t\t\t\t\t\t\tif (j < 1 || j > 8) {\r\n\t\t\t\t\t\t\t\t\tstats.numMalformedRtcpPkts++;\r\n\t\t\t\t\t\t\t\t\tif (logger.isActivated()) {\r\n\t\t\t\t\t\t\t\t\t\tlogger.error(\"Bad RTCP SDES packet format\");\r\n\t\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\t\treturn null;\r\n\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\tif (j == 1) {\r\n\t\t\t\t\t\t\t\t\tgotcname = true;\r\n\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\tRtcpSdesItem item = new RtcpSdesItem();\r\n\t\t\t\t\t\t\t\titems.addElement(item);\r\n\t\t\t\t\t\t\t\titem.type = j;\r\n\t\t\t\t\t\t\t\tint sdeslen = in.readUnsignedByte();\r\n\t\t\t\t\t\t\t\titem.data = new byte[sdeslen];\r\n\t\t\t\t\t\t\t\tin.readFully(item.data);\r\n\t\t\t\t\t\t\t\tsdesoff += 2 + sdeslen;\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\tif (!gotcname) {\r\n\t\t\t\t\t\t\t\tstats.numMalformedRtcpPkts++;\r\n\t\t\t\t\t\t\t\tif (logger.isActivated()) {\r\n\t\t\t\t\t\t\t\t\tlogger.error(\"Bad RTCP SDES packet format\");\r\n\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\treturn null;\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\tchunk.items = new RtcpSdesItem[items.size()];\r\n\t\t\t\t\t\t\titems.copyInto(chunk.items);\r\n\t\t\t\t\t\t\tif ((sdesoff & 3) != 0) {\r\n                                if (in.skip(4 - (sdesoff & 3)) != 4 - (sdesoff & 3)) {\n                                    if (logger.isActivated()) {\n                                        logger.error(\"Bad RTCP SDES packet format\");\n                                    }\n                                    return null;\n                                }\r\n\t\t\t\t\t\t\t\tsdesoff = sdesoff + 3 & -4;\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\tif (inlength != sdesoff) {\r\n\t\t\t\t\t\t\tstats.numMalformedRtcpPkts++;\r\n\t\t\t\t\t\t\tif (logger.isActivated()) {\r\n\t\t\t\t\t\t\t\tlogger.error(\"Bad RTCP SDES packet format\");\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\treturn null;\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\t// Notify event listeners\r\n                        notifyRtcpListeners(new RtcpSdesEvent(sdesp));\r\n\t\t\t\t\t\tbreak;\r\n\r\n\t\t\t\t\t// RTCP BYE event\r\n                    case RtcpPacket.RTCP_BYE:\r\n\t\t\t\t\t\tRtcpByePacket byep = new RtcpByePacket(compoundPacket);\r\n\t\t\t\t\t\tsubpacket = byep;\r\n\t\t\t\t\t\tbyep.ssrc = new int[firstbyte];\r\n\t\t\t\t\t\tfor (int i = 0; i < byep.ssrc.length; i++) {\r\n\t\t\t\t\t\t\tbyep.ssrc[i] = in.readInt();\n\t\t\t\t\t\t}\n\r\n\t\t\t\t\t\tint reasonlen;\r\n\t\t\t\t\t\tif (inlength > 4 + 4 * firstbyte) {\r\n\t\t\t\t\t\t\treasonlen = in.readUnsignedByte();\r\n\t\t\t\t\t\t\tbyep.reason = new byte[reasonlen];\r\n\t\t\t\t\t\t\treasonlen++;\r\n\t\t\t\t\t\t} else {\r\n\t\t\t\t\t\t\treasonlen = 0;\r\n\t\t\t\t\t\t\tbyep.reason = new byte[0];\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\treasonlen = reasonlen + 3 & -4;\r\n\t\t\t\t\t\tif (inlength != 4 + 4 * firstbyte + reasonlen) {\r\n\t\t\t\t\t\t\tstats.numMalformedRtcpPkts++;\r\n\t\t\t\t\t\t\tif (logger.isActivated()) {\r\n\t\t\t\t\t\t\t\tlogger.error(\"Bad RTCP BYE packet format\");\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\treturn null;\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\tin.readFully(byep.reason);\n                        int skipBye = reasonlen - byep.reason.length;\n                        if (in.skip(skipBye) != skipBye) {\n                            if (logger.isActivated()) {\n                                logger.error(\"Bad RTCP BYE packet format\");\n                            }\n                            return null;\n                        }\r\n\r\n\t\t\t\t\t\t// Notify event listeners\r\n\t\t\t\t\t\tnotifyRtcpListeners(new RtcpByeEvent(byep));\r\n\t\t\t\t\t\tbreak;\r\n\r\n\t\t\t\t\t// RTCP APP event\r\n                    case RtcpPacket.RTCP_APP:\r\n\t\t\t\t\t\tif (inlength < 12) {\r\n\t\t\t\t\t\t\tif (logger.isActivated()) {\r\n\t\t\t\t\t\t\t\tlogger.error(\"Bad RTCP APP packet format\");\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\treturn null;\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\tRtcpAppPacket appp = new RtcpAppPacket(compoundPacket);\r\n\t\t\t\t\t\tsubpacket = appp;\r\n\t\t\t\t\t\tappp.ssrc = in.readInt();\r\n\t\t\t\t\t\tappp.name = in.readInt();\r\n\t\t\t\t\t\tappp.subtype = firstbyte;\r\n\t\t\t\t\t\tappp.data = new byte[inlength - 12];\r\n\t\t\t\t\t\tin.readFully(appp.data);\r\n                        int skipApp = inlength - 12 - appp.data.length;\n                        if (in.skip(skipApp) != skipApp) {\n                            if (logger.isActivated()) {\n                                logger.error(\"Bad RTCP APP packet format\");\n                            }\n                            return null;\n                        }\r\n\r\n\t\t\t\t\t\t// Notify event listeners\r\n\t\t\t\t\t\tnotifyRtcpListeners(new RtcpApplicationEvent(appp));\r\n\t\t\t\t\t\tbreak;\r\n\r\n\t\t\t\t\t// RTCP unknown event\r\n\t\t\t\t\tdefault:\r\n\t\t\t\t\t\tstats.numUnknownTypes++;\r\n\t\t\t\t\t\tif (logger.isActivated()) {\r\n\t\t\t\t\t\t\tlogger.error(\"Bad RTCP packet format\");\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\treturn null;\r\n\t\t\t\t}\r\n\t\t\t\tsubpacket.offset = offset;\r\n\t\t\t\tsubpacket.length = length;\r\n\t\t\t\tsubpackets.addElement(subpacket);\r\n                if (in.skipBytes(padlen) != padlen) {\n                    if (logger.isActivated()) {\n                        logger.error(\"Bad RTCP packet format\");\n                    }\n                    return null;\n                }\n\t\t\t}\r\n\r\n\t\t} catch (Exception e) {\r\n\t\t\tif (logger.isActivated()) {\r\n\t\t\t\tlogger.error(\"RTCP packet parsing error\", e);\r\n\t\t\t}\r\n\t\t\treturn null;\r\n\t\t}\r\n\t\tcompoundPacket.packets = new RtcpPacket[subpackets.size()];\r\n\t\tsubpackets.copyInto(compoundPacket.packets);\r\n\t\treturn compoundPacket;\r\n\t}\n\n    /**\n     * Add a RTCP event listener\n     *\n     * @param listener Listener\n     */\r\n\tpublic void addRtcpListener(RtcpEventListener listener) {\r\n\t\tif (logger.isActivated()) {\r\n\t\t\tlogger.debug(\"Add a RTCP event listener\");\r\n\t\t}\r\n\t\tlisteners.addElement(listener);\r\n\t}\r\n\r\n\t/**\n     * Remove a RTCP event listener\n     *\n     * @param listener Listener\n     */\r\n\tpublic void removeRtcpListener(RtcpEventListener listener) {\r\n\t\tif (logger.isActivated()) {\r\n\t\t\tlogger.debug(\"Remove a RTCP event listener\");\r\n\t\t}\r\n\t\tlisteners.removeElement(listener);\r\n\t}\r\n\r\n\t/**\n     * Notify RTCP event listeners\n     *\n     * @param event RTCP event\n     */\r\n\tpublic void notifyRtcpListeners(RtcpEvent event) {\r\n\t\tfor(int i=0; i < listeners.size(); i++) {\r\n\t\t\tRtcpEventListener listener = (RtcpEventListener)listeners.elementAt(i);\r\n\t\t\tlistener.receiveRtcpEvent(event);\r\n\t\t}\r\n\t}\n\n    /**\n     * Returns the statistics of RTCP reception\n     *\n     * @return Statistics\n     */\r\n\tpublic RtcpStatisticsReceiver getRtcpReceptionStats() {\r\n\t\treturn stats;\r\n    }\n\n    /**\n     * Returns the DatagramConnection of RTCP\n     *\n     * @return DatagramConnection\n     */\n    public DatagramConnection getConnection() {\n        return datagramConnection;\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpPacketTransmitter.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.core;\r\n\nimport com.orangelabs.rcs.platform.network.DatagramConnection;\nimport com.orangelabs.rcs.platform.network.NetworkFactory;\nimport com.orangelabs.rcs.utils.logger.Logger;\n\nimport java.io.IOException;\nimport java.util.Random;\nimport java.util.Vector;\n\n/**\n * RTCP packet transmitter\n *\n * @author jexa7410\n */\r\npublic class RtcpPacketTransmitter extends Thread {\r\n    /**\r\n\t * Remote address\r\n\t */\r\n\tprivate String remoteAddress;\r\n\r\n    /**\r\n\t * Remote port\r\n\t */\r\n\tprivate int remotePort;\r\n\r\n\t/**\r\n\t * Statistics\r\n\t */\r\n\tprivate RtcpStatisticsTransmitter stats = new RtcpStatisticsTransmitter();\r\n\r\n\t/**\r\n\t * Datagram connection\r\n\t */\r\n\tpublic DatagramConnection datagramConnection = null;\n\n    /**\n     * RTCP Session\n     */\n    private RtcpSession rtcpSession = null;\n\n    /**\n     * Flag used to determine when to terminate after sending a BYE\n     */\n    private boolean waitingForByeBackoff = false;\n\n    /**\n     * Flag used to properly close\n     */\n    private boolean closed = false;\n\n    /**\n     * Random value\n     */\n    private Random rand = new Random();\n\n    /**\n     * The logger\n     */\r\n\tprivate final Logger logger = Logger.getLogger(this.getClass().getName());\n\n    /**\n     * Constructor\n     *\n     * @param address Remote address\n     * @param port Remote port\n     * @param rtcpSession the RTCP session\n     * @throws IOException\n     */\r\n    public RtcpPacketTransmitter(String address, int port, RtcpSession rtcpSession)\n            throws IOException {\n        super();\n\n        this.remoteAddress = address;\n        this.remotePort = port;\n        this.rtcpSession = rtcpSession;\n\n        // Open the connection\n        datagramConnection = NetworkFactory.getFactory().createDatagramConnection();\n        datagramConnection.open();\n\n        if (logger.isActivated()) {\n            logger.debug(\"RTCP transmitter connected to \" + remoteAddress + \":\" + remotePort);\n        }\n    }\n\n    /**\n     * Constructor - used for SYMETRIC_RTP\n     *\n     * @param address Remote address\n     * @param port Remote port\n     * @param rtcpSession the RTCP session\n     * @param DatagramConnection datagram connection of the RtpPacketReceiver\n     * @throws IOException\n     */\n    public RtcpPacketTransmitter(String address, int port, RtcpSession rtcpSession,\n            DatagramConnection connection) throws IOException {\n        super();\n\n        this.remoteAddress = address;\n        this.remotePort = port;\n        this.rtcpSession = rtcpSession;\n\n        // Open the connection\n        if (connection != null) {\n            this.datagramConnection = connection;\n        } else {\n            this.datagramConnection = NetworkFactory.getFactory().createDatagramConnection();\n            this.datagramConnection.open();\n        }\n\n        if (logger.isActivated()) {\n            logger.debug(\"RTCP transmitter connected to \" + remoteAddress + \":\" + remotePort);\n        }\n    }\n\n    /**\n     * Close the transmitter\n     *\n     * @throws IOException\n     */\r\n\tpublic void close() throws IOException {\r\n        rtcpSession.isByeRequested = true;\n        closed = true;\n        // Close the datagram connection\r\n\t\tif (datagramConnection != null) {\r\n\t\t\tdatagramConnection.close();\r\n\t\t}\r\n\t\tif (logger.isActivated()) {\r\n            logger.debug(\"RTCP transmitter closed\");\r\n\t\t}\r\n\t}\r\n\r\n\t/**\r\n\t * Background processing\r\n\t */\r\n\tpublic void run() {\r\n\t\ttry {\r\n            // Send a SDES packet\n            // sendSdesPacket();\n\n            boolean terminate = false;\n            while (!terminate) {\n                try {\n                    // Wait the RTCP report interval.\n                    Thread.sleep((long)rtcpSession.getReportInterval());\n\n                    // Right time to send a RTCP packet or reschedule ?\n                    if ((rtcpSession.timeOfLastRTCPSent + rtcpSession.T) <= rtcpSession\n                            .currentTime()) {\n                        // We know that it is time to send a RTCP packet, is it\n                        // a BYE packet\n                        if ((rtcpSession.isByeRequested && waitingForByeBackoff)) {\n                            // If it is bye then did we ever sent anything\n                            if (rtcpSession.timeOfLastRTCPSent > 0\n                                    && rtcpSession.timeOfLastRTPSent > 0) {\n                                rtcpSession.getMySource().activeSender = false;\n                                rtcpSession.timeOfLastRTCPSent = rtcpSession.currentTime();\n                            } else {\n                                // We never sent anything and we have to quit :(\n                                // do not send BYE\n                                terminate = true;\n                            }\n                        } else {\n                            if (!closed) {\n                                transmit(assembleRtcpPacket());\n                                if (rtcpSession.isByeRequested && !waitingForByeBackoff) {\n                                    // We have sent a BYE packet, so terminate\n                                    terminate = true;\n                                } else {\n                                    rtcpSession.timeOfLastRTCPSent = rtcpSession.currentTime();\n                                }\n                            } else {\n                                terminate = true;\n                            }\n\n                        }\n                    }\n                    waitingForByeBackoff = false;\n\n                } catch (InterruptedException e) {\n                    waitingForByeBackoff = true;\n                    rtcpSession.isByeRequested = true;\n                }\n            }\n\t\t} catch (Exception e) {\r\n\t\t\tif (logger.isActivated()) {\r\n                logger.error(\"Can't send the RTCP packet\", e);\r\n\t\t\t}\r\n\t\t}\r\n\t}\n\n    /**\n     * assemble RTCP packet\n     */\n    private byte[] assembleRtcpPacket() {\n        byte data[] = new byte[0];\n\n        // Sender or receiver packet\n        RtpSource s = rtcpSession.getMySource();\n        if ((s.activeSender) && (rtcpSession.timeOfLastRTCPSent < rtcpSession.timeOfLastRTPSent)) {\n            data = RtcpPacketUtils.append(data, assembleSenderReportPacket());\n        } else {\n            data = RtcpPacketUtils.append(data, assembleReceiverReportPacket());\n        }\n\n        // SDES packets\n        Vector<RtcpSdesPacket> repvec = makereports();\n        for (int i = 0; i < repvec.size(); i++) {\n            if (repvec.elementAt(i).data != null)\n                data = RtcpPacketUtils.append(data, repvec.elementAt(i).data);\n        }\n\n        // BYE packet\n        RtcpByePacket byepacket = null;\n        if (rtcpSession.isByeRequested) {\n            int ssrc[] = {rtcpSession.SSRC};\n            byepacket = new RtcpByePacket(ssrc, null);\n            data = RtcpPacketUtils.append(data, byepacket.data);\n        }\n\n        return data;\n    }\n\n    /**\n     * assemble RTCP SR packet\n     * @return packet data\n     */\n    private byte[] assembleSenderReportPacket() {\n        final int FIXED_HEADER_SIZE = 4;\n        byte V_P_RC = (byte)((RtcpPacket.VERSION << 6) | (RtcpPacket.PADDING << 5) | (0x00));\n        byte ss[] = RtcpPacketUtils.longToBytes(rtcpSession.SSRC, 4);\n        byte PT[] = RtcpPacketUtils.longToBytes((long)RtcpPacket.RTCP_SR, 1);\n        byte NTP_TimeStamp[] = RtcpPacketUtils.longToBytes(rtcpSession.currentTime(), 8);\n        short randomOffset = (short)Math.abs(rand.nextInt() & 0x000000FF);\n        byte RTP_TimeStamp[] = RtcpPacketUtils.longToBytes((long)rtcpSession.tc\n                + randomOffset, 4);\n        byte SenderPacketCount[] = RtcpPacketUtils.longToBytes(rtcpSession.packetCount, 4);\n        byte SenderOctetCount[] = RtcpPacketUtils.longToBytes(rtcpSession.octetCount, 4);\n\n        // report block\n        byte receptionReportBlocks[] = new byte[0];\n        receptionReportBlocks = RtcpPacketUtils.append(receptionReportBlocks,\n                assembleRTCPReceptionReport());\n        byte receptionReports = (byte)(receptionReportBlocks.length / 24);\n        V_P_RC = (byte)(V_P_RC | (byte)(receptionReports & 0x1F));\n\n        // Length is 32 bit words contained in the packet -1\n        byte length[] = RtcpPacketUtils.longToBytes((FIXED_HEADER_SIZE + ss.length\n                + NTP_TimeStamp.length + RTP_TimeStamp.length + SenderPacketCount.length\n                + SenderOctetCount.length + receptionReportBlocks.length) / 4 - 1, 2);\n\n        // Build RTCP SR Packet\n        byte rtcpSRPacket[] = new byte[1];\n        rtcpSRPacket[0] = V_P_RC;\n        rtcpSRPacket = RtcpPacketUtils.append(rtcpSRPacket, PT);\n        rtcpSRPacket = RtcpPacketUtils.append(rtcpSRPacket, length);\n        rtcpSRPacket = RtcpPacketUtils.append(rtcpSRPacket, ss);\n        rtcpSRPacket = RtcpPacketUtils.append(rtcpSRPacket, NTP_TimeStamp);\n        rtcpSRPacket = RtcpPacketUtils.append(rtcpSRPacket, RTP_TimeStamp);\n        rtcpSRPacket = RtcpPacketUtils.append(rtcpSRPacket, SenderPacketCount);\n        rtcpSRPacket = RtcpPacketUtils.append(rtcpSRPacket, SenderOctetCount);\n        rtcpSRPacket = RtcpPacketUtils.append(rtcpSRPacket, receptionReportBlocks);\n\n        return rtcpSRPacket;\n    }\n\n    /**\n     * assemble RTCP RR packet\n     * @return packet data\n     */\n    private byte[] assembleReceiverReportPacket() {\n        final int FIXED_HEADER_SIZE = 4;\n        byte V_P_RC = (byte)((RtcpPacket.VERSION << 6) | (RtcpPacket.PADDING << 5) | (0x00));\n        byte ss[] = RtcpPacketUtils.longToBytes(rtcpSession.SSRC, 4);\n        byte PT[] = RtcpPacketUtils.longToBytes((long)RtcpPacket.RTCP_RR, 1);\n\n        // report block\n        byte receptionReportBlocks[] = new byte[0];\n        receptionReportBlocks = RtcpPacketUtils.append(receptionReportBlocks,\n                assembleRTCPReceptionReport());\n        byte receptionReports = (byte)(receptionReportBlocks.length / 24);\n        V_P_RC = (byte)(V_P_RC | (byte)(receptionReports & 0x1F));\n\n        byte length[] = RtcpPacketUtils.longToBytes(\n                (FIXED_HEADER_SIZE + ss.length + receptionReportBlocks.length) / 4 - 1, 2);\n\n        // Build RTCP RR Packet\n        byte RRPacket[] = new byte[1];\n        RRPacket[0] = V_P_RC;\n        RRPacket = RtcpPacketUtils.append(RRPacket, PT);\n        RRPacket = RtcpPacketUtils.append(RRPacket, length);\n        RRPacket = RtcpPacketUtils.append(RRPacket, ss);\n        RRPacket = RtcpPacketUtils.append(RRPacket, receptionReportBlocks);\n        return RRPacket;\n    }\n\n    /**\n     * assemble RTCP Reception report block\n     * @return report data\n     */\n    private byte[] assembleRTCPReceptionReport() {\n        byte reportBlock[] = new byte[0];\n        RtpSource source = rtcpSession.getMySource();\n\n        source.updateStatistics();\n        byte SSRC[] = RtcpPacketUtils.longToBytes((long)source.SSRC, 4);\n        byte fraction_lost[] = RtcpPacketUtils.longToBytes((long)source.fraction, 1);\n        byte pkts_lost[] = RtcpPacketUtils.longToBytes((long)source.lost, 3);\n        byte last_seq[] = RtcpPacketUtils.longToBytes((long)source.last_seq, 4);\n        byte jitter[] = RtcpPacketUtils.longToBytes((long)source.jitter, 4);\n        byte lst[] = RtcpPacketUtils.longToBytes((long)source.lst, 4);\n        byte dlsr[] = RtcpPacketUtils.longToBytes((long)source.dlsr, 4);\n\n        reportBlock = RtcpPacketUtils.append(reportBlock, SSRC);\n        reportBlock = RtcpPacketUtils.append(reportBlock, fraction_lost);\n        reportBlock = RtcpPacketUtils.append(reportBlock, pkts_lost);\n        reportBlock = RtcpPacketUtils.append(reportBlock, last_seq);\n        reportBlock = RtcpPacketUtils.append(reportBlock, jitter);\n        reportBlock = RtcpPacketUtils.append(reportBlock, lst);\n        reportBlock = RtcpPacketUtils.append(reportBlock, dlsr);\n\n        return reportBlock;\n    }\n\n\t/**\r\n\t * Send a BYE packet\r\n\t */\r\n\tpublic void sendByePacket() {\r\n\t\t// Create a report\r\n\t    Vector<RtcpSdesPacket> repvec = makereports();\r\n\t    RtcpPacket[] packets = new RtcpPacket[repvec.size() + 1];\r\n\t    repvec.copyInto(packets);\r\n\r\n\t    // Create a RTCP bye packet\r\n\t    int ssrc[] = {rtcpSession.SSRC};\r\n\t    RtcpByePacket rtcpbyepacket = new RtcpByePacket(ssrc, null);\r\n\t    packets[packets.length - 1] = rtcpbyepacket;\r\n\r\n\t\t// Create a RTCP compound packet\r\n\t    RtcpCompoundPacket cp = new RtcpCompoundPacket(packets);\r\n\n        rtcpSession.getMySource().activeSender = false;\n\r\n\t    // Send the RTCP packet\r\n\t\ttransmit(cp);\n\t}\r\n\r\n\t/**\n     * Generate a RTCP report\n     *\n     * @return Vector\n     */\n\tpublic Vector<RtcpSdesPacket> makereports() {\r\n\t\tVector<RtcpSdesPacket> packets = new Vector<RtcpSdesPacket>();\r\n\r\n\t\tRtcpSdesPacket rtcpsdespacket = new RtcpSdesPacket(new RtcpSdesBlock[1]);\r\n\t\trtcpsdespacket.sdes[0] = new RtcpSdesBlock();\r\n\t\trtcpsdespacket.sdes[0].ssrc = rtcpSession.SSRC;\r\n\r\n\t\tVector<RtcpSdesItem> vector = new Vector<RtcpSdesItem>();\r\n\t\tvector.addElement(new RtcpSdesItem(1, RtpSource.CNAME));\r\n\t\trtcpsdespacket.sdes[0].items = new RtcpSdesItem[vector.size()];\r\n\t\tvector.copyInto(rtcpsdespacket.sdes[0].items);\r\n\r\n\t\tpackets.addElement(rtcpsdespacket);\r\n\t\treturn packets;\r\n\t}\n\n    /**\n     * Transmit a RTCP compound packet to the remote destination\n     *\n     * @param packet Compound packet to be sent\n     */\r\n\tprivate void transmit(RtcpCompoundPacket packet) {\r\n\t\t// Prepare data to be sent\r\n\t\tbyte[] data = packet.data;\r\n\t\tif (packet.offset > 0) {\r\n\t\t\tSystem.arraycopy(data, packet.offset,\r\n\t\t\t\t\tdata = new byte[packet.length], 0, packet.length);\r\n\t\t}\r\n\r\n\t\t// Update statistics\r\n\t\tstats.numBytes += packet.length;\r\n        stats.numPackets++;\n        rtcpSession.updateavgrtcpsize(packet.length);\n        rtcpSession.timeOfLastRTCPSent = rtcpSession.currentTime();\r\n\t\t// Send data over UDP\r\n\t\ttry {\r\n\t\t\tdatagramConnection.send(remoteAddress, remotePort, data);\r\n\t\t} catch(IOException e) {\r\n\t\t\tif (logger.isActivated()) {\r\n\t\t\t\tlogger.error(\"Can't send the RTCP packet\", e);\r\n\t\t\t}\r\n\t\t}\n    }\n\n    /**\n     * Transmit a RTCP compound packet to the remote destination\n     *\n     * @param packet Compound packet to be sent\n     */\n    private void transmit(byte packet[]) {\n        // Update statistics\n        stats.numBytes += packet.length;\n        stats.numPackets++;\n        rtcpSession.updateavgrtcpsize(packet.length);\n        rtcpSession.timeOfLastRTCPSent = rtcpSession.currentTime();\n        // Send data over UDP\n        try {\n            datagramConnection.send(remoteAddress, remotePort, packet);\n        } catch (IOException e) {\n            if (logger.isActivated()) {\n                logger.error(\"Can't send the RTCP packet\", e);\n            }\n        }\n    }\n\n    /**\n     * Returns the statistics of RTCP transmission\n     *\n     * @return Statistics\n     */\r\n\tpublic RtcpStatisticsTransmitter getStatistics() {\r\n\t\treturn stats;\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpPacketUtils.java",
    "content": "/*******************************************************************************\r\n * Software Name : RCS IMS Stack\r\n *\r\n * Copyright (C) 2010 France Telecom S.A.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n *      http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n ******************************************************************************/\r\n\r\npackage com.orangelabs.rcs.core.ims.protocol.rtp.core;\r\n\r\n/**\r\n * RTCP utils.\r\n *\r\n * @author hlxn7157\r\n */\r\npublic class RtcpPacketUtils {\r\n\r\n    /**\r\n     * Convert 64 bit long to n bytes.\r\n     *\r\n     * @param data data\r\n     * @param n desired number of bytes to convert the long to.\r\n     * @return buffer\r\n     */\r\n    public static byte[] longToBytes(long data, int n) {\r\n        byte buf[] = new byte[n];\r\n         for (int i = n - 1; i >= 0; i--) {\r\n            buf[i] = (byte)data;\r\n            data = data >> 8;\r\n        }\r\n        return buf;\r\n    }\r\n\r\n    /**\r\n     * Append two byte arrays.\r\n     *\r\n     * @param pck1 first packet.\r\n     * @param pck2 second packet.\r\n     * @return concatenated packet.\r\n     */\r\n    public static byte[] append(byte[] pck1, byte[] pck2) {\r\n        byte packet[] = new byte[pck1.length + pck2.length];\r\n        for (int i = 0; i < pck1.length; i++)\r\n            packet[i] = pck1[i];\r\n        for (int i = 0; i < pck2.length; i++)\r\n            packet[i + pck1.length] = pck2[i];\r\n        return packet;\r\n    }\r\n};\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpReceiverReportPacket.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.core;\r\n\r\nimport java.io.DataOutputStream;\r\nimport java.io.IOException;\r\n\r\n/**\r\n * RCTP RR packet\r\n * \r\n * @author jexa7410\r\n */\r\npublic class RtcpReceiverReportPacket extends RtcpPacket {\r\n\tpublic int ssrc;\r\n\tpublic RtcpReport[] reports;\r\n\r\n\tpublic RtcpReceiverReportPacket(int i, RtcpReport[] rtcpreportblocks) {\r\n\t\tssrc = i;\r\n\t\treports = rtcpreportblocks;\r\n\t\tif (rtcpreportblocks.length > 31)\r\n\t\t\tthrow new IllegalArgumentException(\"Too many reports\");\r\n\t}\r\n\r\n\tpublic RtcpReceiverReportPacket(RtcpPacket rtcppacket) {\r\n\t\tsuper(rtcppacket);\r\n\t\ttype = 201;\r\n\t}\r\n\r\n\tpublic void assemble(DataOutputStream dataoutputstream) throws IOException {\r\n\t\tdataoutputstream.writeByte(128 + reports.length);\r\n\t\tdataoutputstream.writeByte(201);\r\n\t\tdataoutputstream.writeShort(1 + reports.length * 6);\r\n\t\tdataoutputstream.writeInt(ssrc);\r\n\t\tfor (int i = 0; i < reports.length; i++) {\r\n\t\t\tdataoutputstream.writeInt(reports[i].ssrc);\r\n\t\t\tdataoutputstream.writeInt((reports[i].packetslost & 0xffffff)\r\n\t\t\t\t\t+ (reports[i].fractionlost << 24));\r\n\t\t\tdataoutputstream.writeInt((int) reports[i].lastseq);\r\n\t\t\tdataoutputstream.writeInt(reports[i].jitter);\r\n\t\t\tdataoutputstream.writeInt((int) reports[i].lsr);\r\n\t\t\tdataoutputstream.writeInt((int) reports[i].dlsr);\r\n\t\t}\r\n\t}\r\n\r\n\tpublic int calcLength() {\r\n\t\treturn 8 + reports.length * 24;\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpReport.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.core;\r\n\r\n/**\r\n * RTCP report\r\n * \r\n * @author jexa7410\r\n */\r\npublic class RtcpReport {\r\n\tpublic int ssrc;\r\n\tpublic int fractionlost;\r\n\tpublic int packetslost;\r\n\tpublic long lastseq;\r\n\tpublic int jitter;\r\n\tpublic long lsr;\r\n\tpublic long dlsr;\r\n\tpublic long receiptTime;\r\n\r\n\tpublic long getDLSR() {\r\n\t\treturn dlsr;\r\n\t}\r\n\r\n\tpublic int getFractionLost() {\r\n\t\treturn fractionlost;\r\n\t}\r\n\r\n\tpublic long getJitter() {\r\n\t\treturn (long) jitter;\r\n\t}\r\n\r\n\tpublic long getLSR() {\r\n\t\treturn lsr;\r\n\t}\r\n\r\n\tpublic long getNumLost() {\r\n\t\treturn (long) packetslost;\r\n\t}\r\n\r\n\tpublic long getSSRC() {\r\n\t\treturn (long) ssrc;\r\n\t}\r\n\r\n\tpublic long getXtndSeqNum() {\r\n\t\treturn lastseq;\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpSdesBlock.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.core;\r\n\r\n/**\r\n * RCTP SDES block\r\n * \r\n * @author jexa7410\r\n */\r\npublic class RtcpSdesBlock {\r\n\tpublic int ssrc;\r\n\t\r\n\tpublic RtcpSdesItem[] items;\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpSdesItem.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.core;\r\n\r\n/**\r\n * RCTP SDES item\r\n * \r\n * @author jexa7410\r\n */\r\npublic class RtcpSdesItem {\r\n\tpublic int type;\r\n\tpublic byte[] data;\r\n\r\n\tpublic RtcpSdesItem() {\r\n\t}\r\n\r\n\tpublic RtcpSdesItem(int i, String string) {\r\n\t\ttype = i;\r\n\t\tdata = string.getBytes();\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpSdesPacket.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.core;\r\n\r\nimport java.io.DataOutputStream;\r\nimport java.io.IOException;\r\n\r\n/**\r\n * RCTP SDES packet\r\n * \r\n * @author jexa7410\r\n */\r\npublic class RtcpSdesPacket extends RtcpPacket {\r\n\r\n\tpublic RtcpSdesBlock sdes[];\r\n\r\n\tpublic RtcpSdesPacket(RtcpPacket parent) {\r\n\t\tsuper(parent);\r\n\t\tsuper.type = 202;\r\n\t}\r\n\r\n\tpublic RtcpSdesPacket(RtcpSdesBlock sdes[]) {\r\n\t\tthis.sdes = sdes;\r\n\t\tif (sdes.length > 31) {\r\n\t\t\tthrow new IllegalArgumentException(\"Too many SDESs\");\r\n\t\t} else {\r\n\t\t\treturn;\r\n\t\t}\r\n\t}\r\n\r\n\tpublic int calcLength() {\r\n\t\tint len = 4;\r\n\t\tfor (int i = 0; i < sdes.length; i++) {\r\n\t\t\tint sublen = 5;\r\n\t\t\tfor (int j = 0; j < sdes[i].items.length; j++) {\r\n\t\t\t\tsublen += 2 + sdes[i].items[j].data.length;\r\n\t\t\t}\r\n\r\n\t\t\tsublen = sublen + 3 & -4;\r\n\t\t\tlen += sublen;\r\n\t\t}\r\n\r\n\t\treturn len;\r\n\t}\r\n\r\n\tpublic void assemble(DataOutputStream out) throws IOException {\r\n\t\tout.writeByte(128 + sdes.length);\r\n\t\tout.writeByte(202);\r\n\t\tout.writeShort(calcLength() - 4 >> 2);\r\n\t\tfor (int i = 0; i < sdes.length; i++) {\r\n\t\t\tout.writeInt(sdes[i].ssrc);\r\n\t\t\tint sublen = 0;\r\n\t\t\tfor (int j = 0; j < sdes[i].items.length; j++) {\r\n\t\t\t\tout.writeByte(sdes[i].items[j].type);\r\n\t\t\t\tout.writeByte(sdes[i].items[j].data.length);\r\n\t\t\t\tout.write(sdes[i].items[j].data);\r\n\t\t\t\tsublen += 2 + sdes[i].items[j].data.length;\r\n\t\t\t}\r\n\r\n\t\t\tfor (int j = (sublen + 4 & -4) - sublen; j > 0; j--) {\r\n\t\t\t\tout.writeByte(0);\r\n\t\t\t}\r\n\r\n\t\t}\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpSenderReportPacket.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.core;\r\n\r\nimport java.io.DataOutputStream;\r\nimport java.io.IOException;\r\n\r\n/**\r\n * RCTP SR packet\r\n * \r\n * @author jexa7410\r\n */\r\npublic class RtcpSenderReportPacket extends RtcpPacket {\r\n\tpublic int ssrc;\r\n\tpublic long ntptimestampmsw;\r\n\tpublic long ntptimestamplsw;\r\n\tpublic long rtptimestamp;\r\n\tpublic long packetcount;\r\n\tpublic long octetcount;\r\n\tpublic RtcpReport[] reports;\r\n\r\n\tpublic RtcpSenderReportPacket(int i, RtcpReport[] rtcpreportblocks) {\r\n\t\tssrc = i;\r\n\t\treports = rtcpreportblocks;\r\n\t\tif (rtcpreportblocks.length > 31)\r\n\t\t\tthrow new IllegalArgumentException(\"Too many reports\");\r\n\t}\r\n\r\n\tpublic RtcpSenderReportPacket(RtcpPacket rtcppacket) {\r\n\t\tsuper(rtcppacket);\r\n\t\ttype = 200;\r\n\t}\r\n\r\n\tpublic void assemble(DataOutputStream dataoutputstream) throws IOException {\r\n\t\tdataoutputstream.writeByte(128 + reports.length);\r\n\t\tdataoutputstream.writeByte(200);\r\n\t\tdataoutputstream.writeShort(6 + reports.length * 6);\r\n\t\tdataoutputstream.writeInt(ssrc);\r\n\t\tdataoutputstream.writeInt((int) ntptimestampmsw);\r\n\t\tdataoutputstream.writeInt((int) ntptimestamplsw);\r\n\t\tdataoutputstream.writeInt((int) rtptimestamp);\r\n\t\tdataoutputstream.writeInt((int) packetcount);\r\n\t\tdataoutputstream.writeInt((int) octetcount);\r\n\t\tfor (int i = 0; i < reports.length; i++) {\r\n\t\t\tdataoutputstream.writeInt(reports[i].ssrc);\r\n\t\t\tdataoutputstream.writeInt((reports[i].packetslost & 0xffffff)\r\n\t\t\t\t\t+ (reports[i].fractionlost << 24));\r\n\t\t\tdataoutputstream.writeInt((int) reports[i].lastseq);\r\n\t\t\tdataoutputstream.writeInt(reports[i].jitter);\r\n\t\t\tdataoutputstream.writeInt((int) reports[i].lsr);\r\n\t\t\tdataoutputstream.writeInt((int) reports[i].dlsr);\r\n\t\t}\r\n\t}\r\n\r\n\tpublic int calcLength() {\r\n\t\treturn 28 + reports.length * 24;\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpSession.java",
    "content": "/*******************************************************************************\r\n * Software Name : RCS IMS Stack\r\n *\r\n * Copyright (C) 2010 France Telecom S.A.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n *      http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n ******************************************************************************/\r\n\r\npackage com.orangelabs.rcs.core.ims.protocol.rtp.core;\r\n\r\nimport java.util.Date;\r\nimport java.util.Random;\r\n\r\n/**\r\n * RTCP Session Information\r\n *\r\n * @author hlxn7157\r\n */\r\npublic class RtcpSession {\r\n\r\n    /**\r\n     * minimum time between RTCP message (ms)\r\n     */\r\n    private static final int RTCP_MIN_TIME = 5000;\r\n\r\n    /**\r\n     * fraction of RTCP sender messages\r\n     */\r\n    private static final double RTCP_SENDER_BW_FRACTION = 0.25;\r\n\r\n    /**\r\n     * fraction of RTCP receiver messages\r\n     */\r\n    private static final double RTCP_RCVR_BW_FRACTION = 0.75;\r\n\r\n    /**\r\n     * bandwidth\r\n     */\r\n    private double bandwidth;\r\n\r\n    /**\r\n     * rtcp bandwidth\r\n     */\r\n    private double rtcp_bandwidth;\r\n\r\n    /**\r\n     * minimum time between RTCP message (ms)\r\n     */\r\n    private int rtcp_min_time;\r\n\r\n    /**\r\n     * RTCP average packet size\r\n     */\r\n    private int avgrtcpsize;\r\n\r\n    /**\r\n     * no of members\r\n     */\r\n    private int members;\r\n\r\n    /**\r\n     * no of senders\r\n     */\r\n    private int senders;\r\n\r\n    /**\r\n     * initial state\r\n     */\r\n    private Boolean initial;\r\n\r\n    /**\r\n     * is sender ?\r\n     */\r\n    private Boolean isSender;\r\n\r\n    /**\r\n     *   True if session instantiator requested a close.\r\n     */\r\n    public boolean isByeRequested = false;\r\n\r\n    /**\r\n     *   Time this source last sent an RTP Packet\r\n     */\r\n    public double timeOfLastRTPSent = 0;\r\n\r\n    /**\r\n    * The last time an RTCP packet was transmitted.\r\n    */\r\n    public double timeOfLastRTCPSent = 0;\r\n\r\n    /**\r\n     * The startup time for the application.\r\n     */\r\n    public long appStartupTime;\r\n\r\n    /**\r\n     * Ramdomized time interval for next RTCP transmission.\r\n     */\r\n    public double T = 0;\r\n\r\n    /**\r\n     * Synchronization Source identifier for this source.\r\n     */\r\n    public int SSRC;\r\n\r\n    /**\r\n     * RTP Source\r\n     */\r\n    RtpSource rtpSource;\r\n\r\n    /**\r\n     * The current time.\r\n     */\r\n    public double tc = 0;\r\n\r\n    /**\r\n     * Total Number of RTP data packets sent out by this source since starting transmission.\r\n     */\r\n    public long packetCount;\r\n\r\n    /**\r\n    * Total Number of payload octets (i.e not including header or padding)\r\n    * sent out by this source since starting transmission.\r\n    */\r\n    public long octetCount;\r\n\r\n    /**\r\n     * Initialize the Random Number Generator.\r\n     */\r\n    private Random rnd = new Random();\r\n\r\n    /**\r\n     * Constructor.\r\n     *\r\n     * @param isSender is sender\r\n     * @param bandwidth bandwidth (can set 16000 (16kops 128kbps))\r\n     */\r\n    public RtcpSession(boolean isSender, double bandwidth) {\r\n        this.isSender = isSender;\r\n        members = 2;\r\n        senders = 1;\r\n        this.bandwidth = bandwidth;\r\n        rtcp_bandwidth = 0.05 * bandwidth;\r\n        rtcp_min_time = RTCP_MIN_TIME;\r\n        avgrtcpsize = 128;\r\n        initial = true;\r\n\r\n        // Initialize the Session level variables\r\n        appStartupTime = currentTime();\r\n        timeOfLastRTCPSent = appStartupTime;\r\n        tc = appStartupTime;\r\n        SSRC = rnd.nextInt();\r\n        packetCount = 0;\r\n        octetCount = 0;\r\n\r\n        // Init RTP source\r\n        rtpSource = new RtpSource(SSRC);\r\n    }\r\n\r\n    /**\r\n     * Setter of members\r\n     *\r\n     * @param members no of members\r\n     */\r\n    public void setMembers(int members) {\r\n        this.members = members;\r\n    }\r\n\r\n    /**\r\n     * Setter of senders\r\n     *\r\n     * @param senders no of senders\r\n     */\r\n    public void setSenders(int senders) {\r\n        this.senders = senders;\r\n    }\r\n\r\n    /**\r\n     * Get the interval of RTCP message\r\n     *\r\n     * @return interval\r\n     */\r\n    public double getReportInterval() {\r\n        // Interval\r\n        double t;\r\n        // no. of members for computation\r\n        double n;\r\n\r\n        // initial half the min delay for quicker notification\r\n        if (initial) {\r\n            initial = false;\r\n            rtcp_min_time /= 2;\r\n        }\r\n\r\n        // If there were active senders, give them at least a minimum share of\r\n        // the RTCP bandwidth. Otherwise all participants share the RTCP\r\n        // bandwidth equally.\r\n        n = members;\r\n        if (senders > 0 && senders < members * RTCP_SENDER_BW_FRACTION) {\r\n            if (isSender) {\r\n                rtcp_bandwidth *= RTCP_SENDER_BW_FRACTION;\r\n                n = senders;\r\n            } else {\r\n                rtcp_bandwidth *= RTCP_RCVR_BW_FRACTION;\r\n                n -= senders;\r\n            }\r\n        }\r\n\r\n        // get interval\r\n        t = (double)avgrtcpsize * n / bandwidth;\r\n        if (t < rtcp_min_time)\r\n            t = rtcp_min_time;\r\n\r\n        // add noise to avoid traffic bursts\r\n        t *= (Math.random() + 0.5);\r\n\r\n        T = t;\r\n        return t;\r\n    }\r\n\r\n    /**\r\n     * Update the average RTCP packet size\r\n     *\r\n     * @param size\r\n     */\r\n    public void updateavgrtcpsize(int size) {\r\n        avgrtcpsize = (int)(0.0625 * (double)size + 0.9375 * (double)avgrtcpsize);\r\n    }\r\n\r\n    /**\r\n     * Returns a self source object.\r\n     *\r\n     * @return My source object.\r\n     */\r\n    public RtpSource getMySource() {\r\n        return rtpSource;\r\n    }\r\n\r\n    /**\r\n     * Returns current time from the Date().getTime() function.\r\n     *\r\n     * @return The current time.\r\n     */\r\n    public long currentTime() {\r\n        tc = (new Date()).getTime();\r\n        return (long)tc;\r\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpStatisticsReceiver.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.core;\r\n\r\n/**\r\n * RTCP packet statistics receiver\r\n * \r\n * @author jexa7410\r\n */\r\npublic class RtcpStatisticsReceiver {\r\n\t/**\r\n\t * Number of RTCP packets received\r\n\t */\r\n\tpublic int numRtcpPkts = 0;\r\n\t\r\n\t/**\r\n\t * Number of RTCP bytes received\r\n\t */\r\n\tpublic int numRtcpBytes = 0;\r\n\r\n\t/**\r\n\t * Number of RTCP SR packets received\r\n\t */\r\n\tpublic int numSrPkts = 0;\r\n\t\r\n\t/**\r\n\t * Number of bad RTCP packets received\r\n\t */\r\n\tpublic int numBadRtcpPkts = 0;\r\n\t\r\n\t/**\r\n\t * Number of unknown RTCP packets received\r\n\t */\r\n\tpublic int numUnknownTypes = 0;\r\n\t\r\n\t/**\r\n\t * Number of malformed RTCP packets received\r\n\t */\r\n\tpublic int numMalformedRtcpPkts = 0;\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtcpStatisticsTransmitter.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.core;\r\n\r\n/**\r\n * RTCP packet statistics transmitter\r\n * \r\n * @author jexa7410\r\n */\r\npublic class RtcpStatisticsTransmitter {\r\n\t/**\r\n\t * Total number of packets sent\r\n\t */\r\n\tpublic int numPackets = 0;\r\n    \r\n\t/**\r\n\t * Total number of bytes sent\r\n\t */\r\n\tpublic int numBytes = 0;\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtpPacket.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.core;\r\n\r\nimport java.io.ByteArrayOutputStream;\r\nimport java.io.DataOutputStream;\r\nimport java.io.IOException;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.util.Packet;\r\n\r\n/**\r\n * Abstract RTP packet\r\n * \r\n * @author jexa7410\r\n */\r\npublic class RtpPacket extends Packet {\r\n\tpublic Packet base;\r\n\tpublic int marker;\r\n\tpublic int payloadType;\r\n\tpublic int seqnum;\r\n\tpublic long timestamp;\r\n\tpublic int ssrc;\r\n\tpublic int payloadoffset;\r\n\tpublic int payloadlength;\r\n\r\n\tpublic RtpPacket() {\n\t\tsuper();\r\n\t}\r\n\r\n\tpublic RtpPacket(Packet packet) {\r\n\t\tsuper(packet);\r\n\t\t\r\n\t\tbase = packet;\r\n\t}\r\n\r\n\tpublic void assemble(int length) throws IOException {\r\n\t\tthis.length = length;\r\n\t\tthis.offset = 0;\r\n\r\n\t\tByteArrayOutputStream bytearrayoutputstream = new ByteArrayOutputStream(length);\r\n\t\tDataOutputStream dataoutputstream = new DataOutputStream(bytearrayoutputstream);\r\n\t\tdataoutputstream.writeByte(128);\r\n\t\tint i = payloadType;\r\n\t\tif (marker == 1) {\r\n\t\t\ti = payloadType | 0x80;\r\n\t\t}\r\n\t\tdataoutputstream.writeByte((byte) i);\r\n\t\tdataoutputstream.writeShort(seqnum);\r\n\t\tdataoutputstream.writeInt((int) timestamp);\r\n\t\tdataoutputstream.writeInt(ssrc);\r\n\t\tdataoutputstream.write(base.data, payloadoffset, payloadlength);\r\n\t\tdata = bytearrayoutputstream.toByteArray();\r\n\t}\r\n\r\n\tpublic int calcLength() {\r\n\t\treturn payloadlength + 12;\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtpPacketReceiver.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.core;\r\n\r\nimport java.io.IOException;\n\nimport com.orangelabs.rcs.platform.network.DatagramConnection;\nimport com.orangelabs.rcs.platform.network.NetworkFactory;\nimport com.orangelabs.rcs.utils.logger.Logger;\n\n/**\n * RTP packet receiver\n *\n * @author jexa7410\n */\r\npublic class RtpPacketReceiver {\n\t/**\n\t * Max datagram packet size\n\t */\n\tprivate static int DEFAULT_DATAGRAM_SIZE = 4096;\t\n\n    /**\n     * Statistics\n     */\r\n\tprivate RtpStatisticsReceiver stats = new RtpStatisticsReceiver();\r\n\r\n\t/**\n     * Flag that indicates if the received buffer size has been set or not\n     */\r\n\tprivate boolean recvBufSizeSet = false;\r\n\r\n\t/**\n     * Buffer size needed to received RTP packet\n     */\r\n\tprivate int bufferSize = DEFAULT_DATAGRAM_SIZE;\r\n\r\n\t/**\r\n\t * Datagram connection\r\n\t */\r\n    public DatagramConnection datagramConnection = null;\r\n\n    /**\n     * RTCP Session\n     */\n    private RtcpSession rtcpSession = null;\n\r\n\t/**\r\n\t * The logger\r\n\t */\r\n\tprivate Logger logger = Logger.getLogger(this.getClass().getName());\n\n    /**\n     * Constructor\n     *\n     * @param port Listenning port\n     * @throws IOException\n     */\r\n    public RtpPacketReceiver(int port, RtcpSession rtcpSession) throws IOException {\n        this.rtcpSession = rtcpSession;\n        // Create the UDP server\r\n        datagramConnection = NetworkFactory.getFactory().createDatagramConnection();\n        datagramConnection.open(port);\r\n\t\tif (logger.isActivated()) {\r\n            logger.debug(\"RTP receiver created on port \" + port);\r\n\t\t}\r\n\t}\r\n\r\n\t/**\r\n\t * Close the receiver\r\n\t */\r\n\tpublic void close() {\n\t\t// Close the datagram connection\r\n\t\tif (datagramConnection != null) {\r\n\t\t\ttry {\n\t\t\t\tdatagramConnection.close();\n\t\t\t} catch(Exception e) {\n\t\t\t\tif (logger.isActivated()) {\n\t\t\t\t\tlogger.warn(\"Can't close correctly the datagram connection\");\n\t\t\t\t}\n\t\t\t}\r\n\t\t\tdatagramConnection = null;\r\n\t\t}\n\r\n\t}\n\n    /**\n     * Read a RTP packet (blocking method)\n     *\n     * @return RTP packet\n     */\r\n\tpublic RtpPacket readRtpPacket() {\r\n\t\ttry {\r\n\t\t\t// Wait a new packet\r\n            byte[] data = datagramConnection.receive(bufferSize);\r\n\r\n\t\t\t// Parse the RTP packet\r\n\t\t\tRtpPacket pkt = parseRtpPacket(data);\r\n\t\t\tif (pkt.payloadType != 12) {\r\n\t\t\t\t// Update statistics\r\n\t\t\t\tstats.numPackets++;\r\n                stats.numBytes += data.length;\n\n                RtpSource s = rtcpSession.getMySource();\n                s.activeSender = true;\n                s.timeOfLastRTPArrival = rtcpSession.currentTime();\n                s.updateSeq(pkt.seqnum);\n                if (s.noOfRTPPacketsRcvd == 0)\n                    s.base_seq = pkt.seqnum;\n                s.noOfRTPPacketsRcvd++;\n\r\n\t\t\t\treturn pkt;\r\n\t\t\t} else {\r\n\t\t\t\t// Drop the keep-alive packets (payload 12)\r\n\t\t\t\treturn readRtpPacket();\r\n\t\t\t}\r\n\r\n\t\t} catch (Exception e) {\r\n\t\t\tif (logger.isActivated()) {\r\n\t\t\t\tlogger.error(\"Can't parse the RTP packet\", e);\r\n\t\t\t}\r\n\t\t\tstats.numBadRtpPkts++;\r\n\t\t\treturn null;\r\n\t\t}\r\n\t}\n\n    /**\n     * Set the size of the received buffer\n     *\n     * @param size New buffer size\n     */\r\n    public void setRecvBufSize(int size) {\r\n    \tthis.bufferSize = size;\r\n    }\n\n    /**\n     * Parse the RTP packet\n     *\n     * @param data RTP packet not yet parsed\n     * @return RTP packet\n     */\r\n\tprivate RtpPacket parseRtpPacket(byte[] data) {\r\n\t\tRtpPacket packet = new RtpPacket();\r\n\t\ttry {\r\n\t\t\t// Read RTP packet length\r\n            packet.length = data.length;\r\n\n            // Set received timestamp\n            packet.receivedAt = System.currentTimeMillis();\n\r\n\t\t\t// Read marker\r\n\t\t\tif ((byte)((data[1] & 0xff) & 0x80) == (byte) 0x80){\r\n\t\t\t\tpacket.marker = 1;\r\n\t\t\t}else{\r\n\t\t\t\tpacket.marker = 0;\r\n\t\t\t}\r\n\r\n\t\t\t// Read payload type\r\n\t\t\tpacket.payloadType = (byte) ((data[1] & 0xff) & 0x7f);\r\n\r\n\t\t\t// Read seq number\r\n\t\t\tpacket.seqnum = (short)((data[2] << 8) | (data[3] & 0xff));\r\n\r\n\t\t\t// Read timestamp\r\n\t\t\tpacket.timestamp = (((data[4] & 0xff) << 24) | ((data[5] & 0xff) << 16)\r\n\t\t\t\t\t| ((data[6] & 0xff) << 8) | (data[7] & 0xff));\r\n\r\n\t\t\t// Read SSRC\r\n\t\t\tpacket.ssrc = (((data[8] & 0xff) << 24) | ((data[9] & 0xff) << 16)\r\n\t\t\t\t\t| ((data[10] & 0xff) << 8) | (data[11] & 0xff));\r\n\r\n\t\t\t// Read media data after the 12 byte header which is constant\r\n\t\t\tpacket.payloadoffset = 12;\r\n\t\t\tpacket.payloadlength = packet.length - packet.payloadoffset;\r\n\t\t\tpacket.data = new byte[packet.payloadlength];\r\n\t\t\tSystem.arraycopy(data, packet.payloadoffset, packet.data, 0, packet.payloadlength);\r\n\r\n\t\t\t// Update the buffer size\r\n\t\t\tif (!recvBufSizeSet) {\r\n\t\t\t\trecvBufSizeSet = true;\r\n\t\t\t\tswitch (packet.payloadType) {\r\n\t\t\t\t\tcase 14:\r\n\t\t\t\t\tcase 26:\r\n\t\t\t\t\tcase 34:\r\n\t\t\t\t\tcase 42:\r\n\t\t\t\t\t\tsetRecvBufSize(64000);\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t\tcase 31:\r\n\t\t\t\t\t\tsetRecvBufSize(0x1f400);\r\n                        break;\r\n\t\t\t\t\tcase 32:\r\n\t\t\t\t\t\tsetRecvBufSize(0x1f400);\r\n\t\t\t\t\t\tbreak;\r\n\r\n\t\t\t\t\tdefault:\r\n\t\t\t\t\t\tif ((packet.payloadType >= 96) && (packet.payloadType <= 127)) {\r\n\t\t\t\t\t\t\tsetRecvBufSize(64000);\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t}\r\n            }\r\n\t\t} catch (Exception e) {\r\n\t\t\tif (logger.isActivated()) {\r\n\t\t\t\tlogger.error(\"RTP packet parsing error\", e);\r\n\t\t\t}\r\n\t\t\treturn null;\r\n\t\t}\r\n        return packet;\r\n\t}\n\n    /**\n     * Returns the statistics of RTP reception\n     *\n     * @return Statistics\n     */\r\n\tpublic RtpStatisticsReceiver getRtpReceptionStats() {\r\n\t\treturn stats;\r\n\t}\n\n    /**\n     * Returns the DatagramConnection of RTP\n     *\n     * @return DatagramConnection\n     */\n    public DatagramConnection getConnection() {\n        return datagramConnection;\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtpPacketTransmitter.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.core;\r\n\r\nimport java.io.IOException;\n\nimport com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.util.Packet;\nimport com.orangelabs.rcs.platform.network.DatagramConnection;\nimport com.orangelabs.rcs.platform.network.NetworkFactory;\nimport com.orangelabs.rcs.utils.logger.Logger;\n\nimport de.kp.net.rtp.RtpSender;\n\n/**\n * RTP packet transmitter\n *\n * @author jexa7410\n */\r\npublic class RtpPacketTransmitter {\r\n\r\n    /**\r\n     * Sequence number\r\n     */\r\n\tprivate int seqNumber = 0;\r\n\r\n    /**\r\n\t * Remote address\r\n\t */\r\n\tprivate String remoteAddress;\r\n\r\n    /**\r\n\t * Remote port\r\n\t */\r\n\tprivate int remotePort;\n\r\n\t/**\r\n\t * Statistics\r\n\t */\r\n\tprivate RtpStatisticsTransmitter stats = new RtpStatisticsTransmitter();\r\n\r\n\t/**\r\n\t * Datagram connection\r\n\t */\r\n\tprivate DatagramConnection datagramConnection = null;\r\n\n    /**\n     * RTCP Session\n     */\n    private RtcpSession rtcpSession = null;\n\r\n\t/**\r\n\t * The logger\r\n\t */\r\n\tprivate final Logger logger = Logger.getLogger(this.getClass().getName());\n\n\n\t// TODO: use Transmitter for its buildRtpPacket functionality\n    public RtpPacketTransmitter(RtcpSession rtcpSession) {\n        this.rtcpSession = rtcpSession;\n\t\tif (logger.isActivated()) {\n            logger.debug(\"RTP broadcast transmitter initiated with SSCR: \" + this.rtcpSession.SSRC);\n\t\t}\n\n    }\n\n\t\n\t/**\n     * Constructor\n     *\n     * @param address Remote address\n     * @param port Remote port\n     * @throws IOException\n     */\r\n    public RtpPacketTransmitter(String address, int port, RtcpSession rtcpSession)\n            throws IOException {\r\n\t\tthis.remoteAddress = address;\r\n\t\tthis.remotePort = port;\n        this.rtcpSession = rtcpSession;\r\n        datagramConnection = NetworkFactory.getFactory().createDatagramConnection();\n        datagramConnection.open();\r\n\t\tif (logger.isActivated()) {\r\n            logger.debug(\"RTP transmitter connected to \" + remoteAddress + \":\" + remotePort);\r\n\t\t}\r\n\t}\n\n    /**\n     * Constructor - used for SYMETRIC_RTP\n     *\n     * @param address Remote address\n     * @param port Remote port\n     * @param DatagramConnection datagram connection of the RtpPacketReceiver\n     * @throws IOException\n     */\n    public RtpPacketTransmitter(String address, int port, RtcpSession rtcpSession,\n            DatagramConnection connection)\n            throws IOException {\n        this.remoteAddress = address;\n        this.remotePort = port;\n        this.rtcpSession = rtcpSession;\n        if (connection != null) {\n            this.datagramConnection = connection;\n        } else {\n            this.datagramConnection = NetworkFactory.getFactory().createDatagramConnection();\n            this.datagramConnection.open();\n        }\n\n        if (logger.isActivated()) {\n            logger.debug(\"RTP transmitter connected to \" + remoteAddress + \":\" + remotePort);\n        }\n    }\n\n    /**\n     * Close the transmitter\n     *\n     * @throws IOException\n     */\r\n\tpublic void close() throws IOException {\r\n\t\t// Close the datagram connection\r\n\t\tif (datagramConnection != null) {\r\n\t\t\tdatagramConnection.close();\r\n\t\t}\r\n\t\tif (logger.isActivated()) {\r\n            logger.debug(\"RTP transmitter closed\");\r\n\t\t}\r\n\t}\r\n\r\n    /**\n     * Send a RTP packet\n     *\n     * @param buffer Input buffer\n     * @throws IOException\n     */\r\n\tpublic void sendRtpPacket(Buffer buffer) throws IOException {\r\n\t\t// Build a RTP packet\r\n    \tRtpPacket packet = buildRtpPacket(buffer);\r\n    \tif (packet == null) {\r\n    \t\treturn;\r\n    \t}\r\n\r\n    \t// Assemble RTP packet\r\n    \tint size = packet.calcLength();\r\n    \tpacket.assemble(size);\r\n\r\n    \t// Send the RTP packet to the remote destination\r\n    \ttransmit(packet);\r\n    }\n\n    /**\n     * Build a RTP packet\n     *\n     * @param buffer Input buffer\n     * @return RTP packet\n     */\r\n\tprivate RtpPacket buildRtpPacket(Buffer buffer) {\r\n\t\tbyte data[] = (byte[])buffer.getData();\r\n\t\tif (data == null) {\r\n\t\t\treturn null;\r\n\t\t}\r\n\t\tPacket packet = new Packet();\r\n\t\tpacket.data = data;\r\n\t\tpacket.offset = 0;\r\n\t\tpacket.length = buffer.getLength();\r\n\r\n\t\tRtpPacket rtppacket = new RtpPacket(packet);\r\n\t\tif ((buffer.getFlags() & 0x800) != 0) {\r\n\t\t\trtppacket.marker = 1;\r\n\t\t} else {\r\n\t\t\trtppacket.marker = 0;\r\n\t\t}\r\n\r\n\t\trtppacket.payloadType = buffer.getFormat().getPayload();\r\n\t\trtppacket.seqnum = seqNumber++;\r\n\t\trtppacket.timestamp = buffer.getTimeStamp();\r\n        rtppacket.ssrc = rtcpSession.SSRC;\r\n\t\trtppacket.payloadoffset = buffer.getOffset();\r\n\t\trtppacket.payloadlength = buffer.getLength();\r\n\t\treturn rtppacket;\r\n\t}\n\n    /**\n     * Transmit a RTCP compound packet to the remote destination\n     *\n     * @param packet RTP packet\n     * @throws IOException\n     */\r\n\tprivate void transmit(Packet packet) {\r\n\t\t// Prepare data to be sent\r\n\t\tbyte[] data = packet.data;\n\t\t\n\t\tif (packet.offset > 0) {\n\t\t\tSystem.arraycopy(data, packet.offset, data = new byte[packet.length], 0, packet.length);\n\t\t}\n\n\t\t// broadcast data\n    \ttry {\n\t\t\tRtpSender.getInstance().send(data);\n\t\t} catch (IOException e1) {\n\t\t\t// TODO Auto-generated catch block\n\t\t\te1.printStackTrace();\n\t\t\tif (logger.isActivated()) {\n\t\t\t\tlogger.error(\"Can't broadcast the RTP packet\", e1);\n\t\t\t}\n\t\t}\r\n\r\n//\t\t// Update statistics\r\n//\t\tstats.numBytes += packet.length;\r\n//\t\tstats.numPackets++;\r\n//\r\n//\t\t// Send data over UDP\r\n//\t\ttry {\r\n//\t\t\tdatagramConnection.send(remoteAddress, remotePort, data);\n//\n//            RtpSource s = rtcpSession.getMySource();\n//            s.activeSender = true;\n//            rtcpSession.timeOfLastRTPSent = rtcpSession.currentTime();\n//            rtcpSession.packetCount++;\n//            rtcpSession.octetCount += data.length;\n//\t\t} catch (IOException e) {\r\n//\t\t\tif (logger.isActivated()) {\r\n//\t\t\t\tlogger.error(\"Can't send the RTP packet\", e);\r\n//\t\t\t}\r\n//        }\n    }\r\n\r\n    /**\n     * Returns the statistics of RTP transmission\n     *\n     * @return Statistics\n     */\r\n\tpublic RtpStatisticsTransmitter getStatistics() {\r\n\t\treturn stats;\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtpSource.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.core;\n\nimport java.util.Date;\n\n/**\n * RTP source\n *\n * @author jexa7410\n */\r\npublic class RtpSource {\r\n\t/**\r\n\t * CNAME value\r\n\t */\r\n    public static String CNAME = \"anonymous@127.0.0.1\";\r\n\r\n\t/**\r\n\t * SSRC\r\n\t */\r\n    public int SSRC;\n\n    /**\n     * Fraction of RTP data packets from source SSRC lost since the previous\n     * SR or RR packet was sent, expressed as a fixed point number with the\n     * binary point at the left edge of the field.  To get the actual fraction\n     * multiply by 256 and take the integral part\n     */\n    public double fraction;\n\n    /**\n     * Cumulative number of packets lost (signed 24bits).\n     */\n    public long lost;\n\n    /**\n     * Extended highest sequence number received.\n     */\n    public long last_seq;\n\n    /**\n     * Interarrival jitter.\n     */\n    public long jitter;\n\n    /**\n     * Last SR Packet from this source.\n     */\n    public long lst;\n\n    /**\n     * Delay since last SR packet.\n     */\n    public double dlsr;\n\n    /**\n     * Is this source and ActiveSender.\n     */\n    public boolean activeSender;\n\n    /**\n     * Time the last RTCP Packet was received from this source.\n     */\n    public double timeOfLastRTCPArrival;\n\n    /**\n     * Time the last RTP Packet was received from this source.\n     */\n    public double timeOfLastRTPArrival;\n\n    /**\n     * Time the last Sender Report RTCP Packet was received from this source.\n     */\n    public double timeofLastSRRcvd;\n\n    /**\n     * Total Number of RTP Packets Received from this source\n     */\n    public int noOfRTPPacketsRcvd;\n\n    /**\n     * Sequence Number of the first RTP packet received from this source\n     */\n    public long base_seq;\n\n    /**\n     * Number of RTP Packets Expected from this source\n     */\n    public long expected;\n\n    /**\n     * No of  RTP Packets expected last time a Reception Report was sent\n     */\n    public long expected_prior;\n\n    /**\n     * No of  RTP Packets received last time a Reception Report was sent\n     */\n    public long received_prior;\n\n    /**\n     * Highest Sequence number received from this source\n     */\n    public long max_seq;\n\n    /**\n     * Keep track of the wrapping around of RTP sequence numbers, since RTP Seq No. are\n     * only 16 bits\n     */\n    public long cycles;\n\n    /**\n     * Since Packets lost is a 24 bit number, it should be clamped at WRAPMAX = 0xFFFFFFFF\n     */\n    public long WRAPMAX = 0xFFFFFFFF;\n\n    /**\n     * Constructor requires an SSRC for it to be a valid source. The constructor initializes\n     * all the source class members to a default value\n     *\n     * @param   sourceSSRC SSRC of the new source\n     */\n    RtpSource(int sourceSSRC) {\n        long time = currentTime();\n        SSRC = sourceSSRC;\n        fraction = 0;\n        lost = 0;\n        last_seq = 0;\n        jitter = 0;\n        lst = 0;\n        dlsr = 0;\n        activeSender = false;\n        timeOfLastRTCPArrival = time;\n        timeOfLastRTPArrival = time;\n        timeofLastSRRcvd = time;\n        noOfRTPPacketsRcvd = 0;\n        base_seq = 0;\n        expected_prior = 0;\n        received_prior = 0;\n    }\n\n    /**\n     * Returns the extended maximum sequence for a source\n     * considering that sequences cycle.\n     *\n     * @return  Sequence Number\n     */\n\n    public long getExtendedMax() {\n        return (cycles + max_seq);\n    }\n\n    /**\n     * This safe sequence update function will try to\n     * determine if seq has wrapped over resulting in a\n     * new cycle.  It sets the cycle -- source level\n     * variable which keeps track of wraparounds.\n     *\n     * @param seq  Sequence Number\n     */\n    public void updateSeq(long seq) {\n        // If the diferrence between max_seq and seq\n        // is more than 1000, then we can assume that\n        // cycle has wrapped around.\n        if (max_seq == 0)\n            max_seq = seq;\n        else {\n            if (max_seq - seq > 0.5 * WRAPMAX)\n                cycles += WRAPMAX;\n\n            max_seq = seq;\n        }\n\n    }\n\n    /**\n     * Updates the various statistics for this source e.g. Packets Lost, Fraction lost\n     * Delay since last SR etc, according to the data gathered since a last SR or RR was sent out.\n     * This method is called prior to sending a Sender Report(SR)or a Receiver Report(RR)\n     * which will include a Reception Report block about this source.\n     */\n    public int updateStatistics() {\n        // Set all the relevant parameters\n\n        // Calculate the highest sequence number received in an RTP Data Packet\n        // from this source\n        last_seq = getExtendedMax();\n\n        // Number of Packets lost = Number of Packets expected - Number of\n        // Packets actually rcvd\n        expected = getExtendedMax() - base_seq + 1;\n        lost = expected - noOfRTPPacketsRcvd;\n\n        // Clamping at 0xffffff\n        if (lost > 0xffffff)\n            lost = 0xffffff;\n\n        // Calculate the fraction lost\n        long expected_interval = expected - expected_prior;\n        expected_prior = expected;\n\n        long received_interval = noOfRTPPacketsRcvd - received_prior;\n        received_prior = noOfRTPPacketsRcvd;\n\n        long lost_interval = expected_interval - received_interval;\n\n        if (expected_interval == 0 || lost_interval <= 0)\n            fraction = 0;\n        else\n            fraction = (lost_interval << 8) / (double) expected_interval;\n\n        // dlsr - express it in units of 1/65336 seconds\n        dlsr = (timeofLastSRRcvd - currentTime()) / 65536;\n\n        return 0;\n    }\n\n    /**\n     * Returns current time from the Date().getTime() function.\n     *\n     * @return The current time.\n     */\n    private static long currentTime() {\n        return (long)((new Date()).getTime());\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtpStatisticsReceiver.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.core;\r\n\r\n/**\r\n * RTP statistics receiver\r\n * \r\n * @author jexa7410\r\n */\r\npublic class RtpStatisticsReceiver {\r\n\t/**\r\n\t * Number of RTP packets received\r\n\t */\r\n\tpublic int numPackets = 0;\r\n\t\r\n\t/**\r\n\t * Number of RTP bytes received\r\n\t */\r\n\tpublic int numBytes = 0;\r\n\t\r\n\t/**\r\n\t * Number of bad RTP packet received\r\n\t */\r\n\tpublic int numBadRtpPkts = 0;\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/core/RtpStatisticsTransmitter.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.core;\r\n\r\n/**\r\n * RTP statistics transmitter\r\n * \r\n * @author jexa7410\r\n */\r\npublic class RtpStatisticsTransmitter {\r\n\t/**\r\n\t * Total number of packets sent\r\n\t */\r\n\tpublic int numPackets = 0;\r\n    \r\n\t/**\r\n\t * Total number of bytes sent\r\n\t */\r\n\tpublic int numBytes = 0;\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/event/RtcpApplicationEvent.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.event;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.core.RtcpAppPacket;\r\n\r\n/**\r\n * RTCP application event\r\n * \r\n * @author jexa7410\r\n */\r\npublic class RtcpApplicationEvent extends RtcpEvent {\r\n\r\n\t/**\r\n\t * Constructor\r\n\t * \r\n\t * @param packet RTCP APP packet\r\n\t */\r\n\tpublic RtcpApplicationEvent(RtcpAppPacket packet) {\r\n\t\tsuper(packet);\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/event/RtcpByeEvent.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.event;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.core.RtcpByePacket;\r\n\r\n/**\r\n * RTCP bye event\r\n * \r\n * @author jexa7410\r\n */\r\npublic class RtcpByeEvent extends RtcpEvent {\r\n\r\n\t/**\r\n\t * Constructor\r\n\t * \r\n\t * @param packet RTCP BYE packet\r\n\t */\r\n\tpublic RtcpByeEvent(RtcpByePacket packet) {\r\n\t\tsuper(packet);\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/event/RtcpEvent.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.event;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.core.RtcpPacket;\r\n\r\n/**\r\n * Abstract RTCP event\r\n * \r\n * @author jexa7410\r\n */\r\npublic abstract class RtcpEvent {\r\n\t/**\r\n\t * RTCP packet\r\n\t */\r\n\tprivate RtcpPacket packet;\r\n\t\r\n\t/**\r\n\t * Constructor\r\n\t * \r\n\t * @param packet RTCP packet\r\n\t */\r\n\tpublic RtcpEvent(RtcpPacket packet) {\r\n\t\tthis.packet = packet;\r\n\t}\r\n\r\n\t/**\r\n\t * Returns the RTCP packet\r\n\t * \r\n\t * @return Packet\r\n\t */\r\n\tpublic RtcpPacket getPacket() {\r\n\t\treturn packet;\r\n\t}\t\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/event/RtcpEventListener.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.event;\r\n\r\n/**\r\n * RTCP events listener interface\r\n * \r\n * @author jexa7410\r\n */\r\npublic interface RtcpEventListener {\r\n\t/**\r\n\t * Receive RTCP event\r\n\t * \r\n\t * @param event RTCP event\r\n\t */\r\n\tvoid receiveRtcpEvent(RtcpEvent event);\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/event/RtcpReceiverReportEvent.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.event;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.core.RtcpReceiverReportPacket;\r\n\r\n/**\r\n * RTCP receiver report event\r\n * \r\n * @author jexa7410\r\n */\r\npublic class RtcpReceiverReportEvent extends RtcpEvent {\r\n\r\n\t/**\r\n\t * Constructor\r\n\t * \r\n\t * @param packet RTCP RR packet\r\n\t */\r\n\tpublic RtcpReceiverReportEvent(RtcpReceiverReportPacket packet) {\r\n\t\tsuper(packet);\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/event/RtcpSdesEvent.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.event;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.core.RtcpSdesPacket;\r\n\r\n/**\r\n * RTCP session description event\r\n * \r\n * @author jexa7410\r\n */\r\npublic class RtcpSdesEvent extends RtcpEvent {\r\n\r\n\t/**\r\n\t * Constructor\r\n\t * \r\n\t * @param packet RTCP SDES packet\r\n\t */\r\n\tpublic RtcpSdesEvent(RtcpSdesPacket packet) {\r\n\t\tsuper(packet);\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/event/RtcpSenderReportEvent.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.event;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.core.RtcpSenderReportPacket;\r\n\r\n/**\r\n * RTCP sender report event\r\n * \r\n * @author jexa7410\r\n */\r\npublic class RtcpSenderReportEvent extends RtcpEvent {\r\n\r\n\t/**\r\n\t * Constructor\r\n\t * \r\n\t * @param packet RTCP SR packet\r\n\t */\r\n\tpublic RtcpSenderReportEvent(RtcpSenderReportPacket packet) {\r\n\t\tsuper(packet);\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/format/DummyFormat.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.format;\r\n\r\n/**\r\n * Dummy format \r\n * \r\n * @author jexa7410\r\n */\r\npublic class DummyFormat extends Format {\r\n\t\r\n\t/**\r\n\t * Encoding name\r\n\t */\r\n\tpublic static final String ENCODING = \"dummy\";\r\n\t\r\n\t/**\r\n\t * Payload type\r\n\t */\r\n\tpublic static final int PAYLOAD = 12;\r\n\r\n\t/**\r\n\t * Constructor\r\n\t */\r\n\tpublic DummyFormat() {\r\n\t\tsuper(ENCODING, PAYLOAD);\r\n\t}\r\n\r\n\t/**\r\n\t * Get the size of a chunk of data from the source\r\n\t * \r\n\t * @return The minimum size of the buffer needed to read a chunk of data\r\n\t */\r\n    public int getDataChunkSize() {\r\n    \treturn 0;\r\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/format/Format.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.format;\r\n\r\n/**\r\n * Abstract format\r\n * \r\n * @author jexa7410\r\n */\r\npublic abstract class Format {\r\n\t/**\r\n\t * Unknown payload\r\n\t */\r\n    public static final int UNKNOWN_PAYLOAD = -1;\r\n\r\n    /**\r\n     * Codec\r\n     */\r\n    private String codec;\r\n\r\n\t/**\r\n     * Payload type\r\n     */\r\n    private int payload;\r\n\r\n    /**\r\n     * Constructor\r\n     * \r\n     * @param codec Codec\r\n     * @param payload Payload type\r\n     */\r\n    public Format(String codec, int payload) {\r\n    \tthis.codec = codec;\t\r\n    \tthis.payload = payload;\r\n    }\r\n\r\n    /**\r\n     * Get the codec name\r\n     *\r\n     * @return Name \r\n     */\r\n    public String getCodec() {\r\n    \treturn codec;\r\n    }\r\n\r\n    /**\r\n     * Get the type of payload\r\n     * \r\n     * @return Payload type\r\n     */\r\n    public int getPayload() {\r\n    \treturn payload;\r\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/format/audio/AudioFormat.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.format.audio;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.Format;\r\n\r\n/**\r\n * Audio format\r\n */\r\npublic class AudioFormat extends Format {\r\n    /**\r\n     * Constructor\r\n     * \r\n     * @param codec Codec\r\n     * @param payload Payload type\r\n     */\r\n    public AudioFormat(String codec, int payload) {\r\n    \tsuper(codec, payload);\r\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/format/audio/PcmuAudioFormat.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.format.audio;\r\n\r\n/**\r\n * G711 PCMU audio format\r\n * \r\n * @author jexa7410\r\n */\r\npublic class PcmuAudioFormat extends AudioFormat {\r\n\r\n\t/**\r\n\t * Encoding name\r\n\t */\r\n\tpublic static final String ENCODING = \"pcmu\";\r\n\t\r\n\t/**\r\n\t * Payload type\r\n\t */\r\n\tpublic static final int PAYLOAD = 0;\r\n\t\r\n\t/**\r\n\t * Constructor\r\n\t */\r\n\tpublic PcmuAudioFormat() {\r\n\t\tsuper(ENCODING, PAYLOAD);\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/format/video/H263VideoFormat.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.format.video;\n\n/**\n * H263-2000 (h263++) video format\n * \n * @author jexa7410\n */\r\npublic class H263VideoFormat extends VideoFormat {\r\n\r\n\t/**\r\n\t * Encoding name\r\n\t */\r\n\tpublic static final String ENCODING = \"h263-2000\";\r\n\r\n\t/**\r\n\t * Payload type\r\n\t */\r\n    public static final int PAYLOAD = 97;\n\r\n\t/**\r\n\t * Constructor\r\n\t */\r\n\tpublic H263VideoFormat() {\r\n\t\tsuper(ENCODING, PAYLOAD);\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/format/video/H264VideoFormat.java",
    "content": "/*******************************************************************************\r\n * Software Name : RCS IMS Stack\r\n *\r\n * Copyright (C) 2010 France Telecom S.A.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n *      http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n ******************************************************************************/\r\n\r\npackage com.orangelabs.rcs.core.ims.protocol.rtp.format.video;\r\n\r\n/**\r\n * H264 video format\r\n * \r\n * @author jexa7410\r\n */\r\npublic class H264VideoFormat extends VideoFormat {\r\n\r\n\t/**\r\n\t * Encoding name\r\n\t */\r\n\tpublic static final String ENCODING = \"h264\";\r\n\t\r\n\t/**\r\n\t * Payload type\r\n\t */\r\n\tpublic static final int PAYLOAD = 96;\r\n\t\r\n\t/**\r\n\t * Constructor\r\n\t */\r\n\tpublic H264VideoFormat() {\r\n\t\tsuper(ENCODING, PAYLOAD);\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/format/video/VideoFormat.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.format.video;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.Format;\r\n\r\n/**\r\n * Video format\r\n */\r\npublic class VideoFormat extends Format {\r\n    /**\r\n     * Constructor\r\n     * \r\n     * @param codec Codec\r\n     * @param payload Payload type\r\n     */\r\n    public VideoFormat(String codec, int payload) {\r\n    \tsuper(codec, payload);\r\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/media/MediaException.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.media;\r\n\r\n/**\r\n * Media exception\r\n * \r\n * @author JM. Auffret\r\n */\r\npublic class MediaException extends java.lang.Exception {\r\n\tstatic final long serialVersionUID = 1L;\r\n\t\r\n\t/**\r\n\t * Constructor\r\n\t *\r\n\t * @param error Error message\r\n\t */\r\n\tpublic MediaException(String error) {\r\n\t\tsuper(error);\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/media/MediaInput.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.media;\r\n\r\n/**\r\n * Media input (e.g. camera, microphone)\r\n * \r\n * @author jexa7410\r\n */\r\npublic interface MediaInput {\n\t/**\r\n\t * Open the player\r\n\t * \n\t * @throws MediaException\r\n\t */\r\n\tpublic void open() throws MediaException;\r\n\t\r\n\t/**\r\n\t * Close the player\r\n\t */\r\n\tpublic void close();\r\n\r\n\t/**\n\t * Read a media sample (blocking method)\n\t * \n\t * @return Media sample\n\t * @throws MediaException\n\t */\n\tpublic MediaSample readSample() throws MediaException;\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/media/MediaOutput.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.media;\r\n\r\n\r\n/**\r\n * Media output (e.g. screen, headset)\r\n * \r\n * @author jexa7410\r\n */\r\npublic interface MediaOutput {\r\n\t/**\r\n\t * Open the renderer\r\n\t * \n\t * @throws MediaException\n\t */\r\n\tpublic void open() throws MediaException;\r\n\t\r\n\t/**\r\n\t * Close the renderer\r\n\t */\r\n\tpublic void close();\r\n\r\n\t/**\r\n\t * Write a media sample\r\n\t * \r\n\t * @param sample Media sample\r\n\t * @throws MediaException\r\n\t */\r\n\tpublic void writeSample(MediaSample sample) throws MediaException;\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/media/MediaSample.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.media;\r\n\r\n/**\r\n * Media sample\r\n * \r\n * @author jexa7410\r\n */\r\npublic class MediaSample {\r\n\r\n\t/**\r\n\t * Data\r\n\t */\r\n\tprivate byte[] data;\r\n\t\r\n\t/**\r\n\t * Time stamp\r\n\t */\r\n\tprivate long time;\r\n\t\r\n\t/**\r\n\t * Constructor\r\n\t * \r\n\t * @param data Data\r\n\t * @param time Time stamp\r\n\t */\r\n\tpublic MediaSample(byte[] data, long time) {\r\n\t\tthis.data = data;\r\n\t\tthis.time = time;\r\n\t}\r\n\r\n\t/**\r\n\t * Returns the data sample\r\n\t * \r\n\t * @return Byte array\r\n\t */\r\n\tpublic byte[] getData() {\r\n\t\treturn data;\r\n\t}\r\n\t\r\n\t/**\r\n\t * Returns the length of the data sample\r\n\t * \r\n\t * @return Data sample length\r\n\t */\r\n\tpublic int getLength() {\r\n\t\tif (data != null) {\r\n\t\t\treturn data.length;\r\n\t\t} else {\r\n\t\t\treturn 0;\r\n\t\t}\r\n\t}\r\n\r\n\t/**\r\n\t * Returns the time stamp of the sample\r\n\t * \r\n\t * @return Time in microseconds\r\n\t */\r\n\tpublic long getTimeStamp() {\r\n\t\treturn time;\r\n\t}\t\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/stream/DummyPacketSourceStream.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.stream;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.DummyFormat;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.Format;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.util.SystemTimeBase;\nimport com.orangelabs.rcs.utils.FifoBuffer;\nimport com.orangelabs.rcs.utils.logger.Logger;\n\r\n/**\r\n * Dummy packet source stream (used to pass NAT)\r\n * \r\n * @author jexa7410\r\n */\r\npublic class DummyPacketSourceStream extends Thread implements ProcessorInputStream {\r\n\t/**\r\n\t * Source period (in seconds)\r\n\t */\r\n\tpublic static int DUMMY_SOURCE_PERIOD = 15;\r\n\t\r\n\t/**\r\n\t * Input format\r\n\t */\r\n\tprivate DummyFormat format = new DummyFormat();\r\n \r\n    /**\r\n     * Time base\r\n     */\r\n    private SystemTimeBase systemTimeBase = new SystemTimeBase();\r\n\r\n    /**\r\n     * Sequence number\r\n     */\r\n    private long seqNo = 0;\r\n\r\n    /**\r\n     * Message buffer\r\n     */\r\n\tprivate FifoBuffer fifo = new FifoBuffer();\r\n\r\n\t/**\r\n     * The logger\r\n     */\r\n    private Logger logger = Logger.getLogger(this.getClass().getName());\r\n\r\n    /**\r\n     * Interruption flag\r\n     */\r\n    private boolean interrupted = false;\r\n    \r\n    /**\r\n\t * Constructor\r\n\t */\r\n\tpublic DummyPacketSourceStream() {\r\n\t}\r\n    \r\n    /**\r\n\t * Open the input stream\r\n\t * \r\n     * @throws Exception\r\n\t */\t\r\n    public void open() throws Exception {\r\n    \tstart();\r\n\t\tif (logger.isActivated()) {\r\n\t\t\tlogger.debug(\"Dummy source stream openned\");\r\n\t\t}\r\n\t}    \t\r\n\t\r\n    /**\r\n     * Close the input stream\r\n     */\r\n    public void close() {\r\n    \tinterrupted = true;\r\n    \ttry {\r\n    \t\tfifo.close();\r\n    \t} catch(Exception e) {\n            // Intentionally blank\n    \t}\r\n\t\tif (logger.isActivated()) {\r\n\t\t\tlogger.debug(\"Dummy source stream closed\");\r\n\t\t}\r\n    }\r\n    \r\n    /**\r\n     * Format of the data provided by the source stream\r\n     * \r\n     * @return Format\r\n     */\r\n    public Format getFormat() {\r\n    \treturn format;\r\n    }\r\n    \r\n    /**\r\n     * Background processing\r\n     */\r\n    public void run() {\r\n    \twhile(!interrupted) {\r\n\t    \ttry {\r\n\t    \t\t// Build a new dummy packet\r\n\t    \t    Buffer packet = new Buffer();\r\n\t    \t    packet.setData(new byte[0]);   \t\r\n\t    \t    packet.setLength(0);\r\n\t    \t    packet.setFormat(format);\r\n\t    \t    packet.setSequenceNumber(seqNo++);\r\n\t    \t    packet.setFlags(Buffer.FLAG_SYSTEM_TIME | Buffer.FLAG_LIVE_DATA);\r\n\t        \tpacket.setTimeStamp(systemTimeBase.getTime());    \t    \t\r\n\r\n\t        \t// Post the packet in the FIFO\r\n\t        \tfifo.addObject(packet);\r\n\t        \t\r\n\t    \t\t// Make a pause\r\n\t    \t\tThread.sleep(DUMMY_SOURCE_PERIOD * 1000);\r\n\t    \t} catch(Exception e) {\r\n\t    \t\tif (logger.isActivated()) {\r\n\t    \t\t\tlogger.error(\"Dummy packet source has failed\", e);\r\n\t    \t\t}\r\n\t    \t}\r\n    \t}    \r\n    }\r\n\r\n    /**\r\n     * Read from the stream\r\n     * \r\n     * @return Buffer\r\n     * @throws Exception\r\n     */\r\n    public Buffer read() throws Exception {\r\n    \t// Read the FIFO the buffer\t        \t    \t\r\n    \tBuffer buffer = (Buffer)fifo.getObject();  \r\n    \treturn buffer;  \r\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/stream/MediaCaptureStream.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.stream;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.Format;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.media.MediaInput;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.media.MediaSample;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer;\nimport com.orangelabs.rcs.utils.logger.Logger;\n\r\n/**\r\n * Media capture stream\r\n * \r\n * @author jexa7410\r\n */\r\npublic class MediaCaptureStream implements ProcessorInputStream {\r\n\t/**\r\n     * Media player\r\n     */\r\n\tprivate MediaInput player;\r\n\r\n\t/**\r\n\t * Media format\r\n\t */\r\n\tprivate Format format;\r\n\t\r\n    /**\r\n     * Sequence number\r\n     */\r\n    private long seqNo = 0;\r\n\r\n    /**\r\n     * Input buffer\r\n     */\r\n\tprivate Buffer buffer = new Buffer();\r\n\r\n\t/**\r\n     * The logger\r\n     */\r\n    private Logger logger = Logger.getLogger(this.getClass().getName());\r\n\r\n    /**\r\n\t * Constructor\r\n\t * \r\n\t * @param format Input format\r\n     * @param player Media player\r\n\t */\r\n    public MediaCaptureStream(Format format, MediaInput player) {\r\n    \tthis.format = format;\r\n\t\tthis.player = player;\r\n\t}\r\n    \r\n    \r\n    /**\r\n\t * Open the input stream\r\n\t * \r\n     * @throws Exception\r\n\t */\t\r\n    public void open() throws Exception {\r\n    \ttry {\r\n\t    \tplayer.open();\r\n\t\t\tif (logger.isActivated()) {\r\n\t\t\t\tlogger.debug(\"Media capture stream openned\");\r\n\t\t\t}\r\n    \t} catch(Exception e) {\r\n\t\t\tif (logger.isActivated()) {\r\n\t\t\t\tlogger.error(\"Media capture stream failed\", e);\r\n\t\t\t}\r\n\t\t\tthrow e;\r\n    \t}\r\n\t}    \t\r\n\t\r\n    /**\r\n     * Close the input stream\r\n     */\r\n    public void close() {\r\n\t\tplayer.close();\r\n\t\tif (logger.isActivated()) {\r\n\t\t\tlogger.debug(\"Media capture stream closed\");\r\n\t\t}\r\n    }\r\n    \r\n    /**\r\n     * Format of the data provided by the source stream\r\n     * \r\n     * @return Format\r\n     */\r\n    public Format getFormat() {\r\n    \treturn format;\r\n    }\r\n\r\n    /**\r\n     * Read from the stream\r\n     * \r\n     * @return Buffer\r\n     * @throws Exception\r\n     */\r\n    public Buffer read() throws Exception {\r\n    \t// Read a new sample from the media player\r\n    \tMediaSample sample = player.readSample();\r\n    \tif (sample == null) {\r\n    \t\treturn null;\r\n    \t}\r\n    \t\r\n    \t// Create a buffer\r\n\t    buffer.setData(sample.getData());   \t\r\n\t    buffer.setLength(sample.getLength());\r\n    \tbuffer.setFormat(format);\r\n    \tbuffer.setSequenceNumber(seqNo++);\r\n    \tbuffer.setFlags(Buffer.FLAG_SYSTEM_TIME | Buffer.FLAG_LIVE_DATA);\r\n    \tbuffer.setTimeStamp(sample.getTimeStamp());\r\n    \treturn buffer;  \r\n    }    \r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/stream/MediaRendererStream.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.stream;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.media.MediaOutput;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.media.MediaSample;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer;\nimport com.orangelabs.rcs.utils.logger.Logger;\n\r\n/**\r\n * Media renderer stream \r\n * \r\n * @author jexa7410\r\n */\r\npublic class MediaRendererStream implements ProcessorOutputStream {\r\n\t/**\r\n     * Media renderer\r\n     */\r\n\tprivate MediaOutput renderer;\r\n    \r\n    /**\r\n\t * The logger\r\n\t */\r\n\tprivate final Logger logger = Logger.getLogger(this.getClass().getName());\r\n\r\n\t/**\r\n\t * Constructor\r\n\t * \r\n     * @param renderer Media renderer\r\n\t */\r\n\tpublic MediaRendererStream(MediaOutput renderer) {\r\n\t\tthis.renderer = renderer;\r\n\t}\r\n\r\n\t/**\r\n\t * Open the output stream\r\n\t * \r\n     * @throws Exception\r\n\t */\t\r\n    public void open() throws Exception {\r\n    \ttry {\r\n\t    \trenderer.open();\r\n\t\t\tif (logger.isActivated()) {\r\n\t\t\t\tlogger.debug(\"Media renderer stream openned\");\r\n\t\t\t}\r\n\t\t} catch(Exception e) {\r\n\t\t\tif (logger.isActivated()) {\r\n\t\t\t\tlogger.error(\"Media renderer stream failed\", e);\r\n\t\t\t}\r\n\t\t\tthrow e; \r\n\t\t}\r\n    }\r\n\r\n    /**\r\n     * Close the output stream\r\n     */\r\n    public void close() {\r\n\t\trenderer.close();\r\n\t\tif (logger.isActivated()) {\r\n\t\t\tlogger.debug(\"Media renderer stream closed\");\r\n\t\t}    \t\r\n    }\r\n        \r\n    /**\r\n     * Write to the stream without blocking\r\n     * \r\n     * @param buffer Input buffer \r\n     * @throws Exception\r\n     */\r\n    public void write(Buffer buffer) throws Exception {\r\n    \tMediaSample sample = new MediaSample((byte[])buffer.getData(), buffer.getTimeStamp());\r\n    \trenderer.writeSample(sample);\r\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/stream/ProcessorInputStream.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.stream;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer;\r\n\r\n/**\r\n * Processor input stream\r\n */\r\npublic interface ProcessorInputStream {\r\n\r\n    /**\r\n\t * Open the input stream\r\n\t * \r\n     * @throws Exception\r\n\t */\t\r\n    public void open() throws Exception;\r\n\r\n    /**\r\n     * Close the input stream\r\n     */\r\n    public void close();\r\n    \r\n    /**\r\n     * Read from the input stream without blocking\r\n     * \r\n     * @return Buffer \r\n     * @throws Exception\r\n     */\r\n    public Buffer read() throws Exception;\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/stream/ProcessorOutputStream.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.stream;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer;\r\n\r\n/**\r\n * Processor output stream\r\n */\r\npublic interface ProcessorOutputStream {\r\n    /**\r\n\t * Open the output stream\r\n\t * \r\n     * @throws Exception\r\n\t */\t\r\n    public void open() throws Exception;\r\n\r\n    /**\r\n     * Close from the output stream\r\n     */\r\n    public void close();\r\n    \r\n    /**\r\n     * Write to the stream without blocking\r\n     * \r\n     * @param buffer Input buffer\r\n     * @throws Exception\r\n     */\r\n    public void write(Buffer buffer) throws Exception;\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/stream/RtpInputStream.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.stream;\r\n\r\n\n\nimport com.orangelabs.rcs.core.ims.protocol.rtp.core.RtcpPacketReceiver;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.core.RtcpSession;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.core.RtpPacket;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.core.RtpPacketReceiver;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.Format;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer;\nimport com.orangelabs.rcs.utils.logger.Logger;\n\n/**\n * RTP input stream\n *\n * @author jexa7410\n */\r\npublic class RtpInputStream implements ProcessorInputStream {\r\n    /**\r\n     * Local port\r\n     */\r\n    private int localPort;\r\n\r\n\t/**\r\n\t * RTP receiver\r\n\t */\r\n\tprivate RtpPacketReceiver rtpReceiver =  null;\r\n\r\n\t/**\r\n\t * RTCP receiver\r\n\t */\r\n\tprivate RtcpPacketReceiver rtcpReceiver =  null;\r\n\r\n    /**\r\n     * Input buffer\r\n     */\r\n\tprivate Buffer buffer = new Buffer();\r\n\r\n    /**\r\n     * Input format\r\n     */\r\n\tprivate Format inputFormat = null;\r\n\n    /**\n     * RTCP Session\n     */\n    private RtcpSession rtcpSession = null;\n\r\n\t/**\r\n\t * The logger\r\n\t */\r\n\tprivate final Logger logger = Logger.getLogger(this.getClass().getName());\n\n    /**\n     * Constructor\n     *\n     * @param localPort Local port\n     * @param inputFormat Input format\n     */\r\n    public RtpInputStream(int localPort, Format inputFormat) {\r\n\t\tthis.localPort = localPort;\r\n\t\tthis.inputFormat = inputFormat;\n\n        rtcpSession = new RtcpSession(false, 16000);\r\n    }\n\n    /**\n     * Open the input stream\n     *\n     * @throws Exception\n     */\r\n    public void open() throws Exception {\n\r\n    \t// Create the RTP receiver\r\n        rtpReceiver = new RtpPacketReceiver(localPort, rtcpSession);\r\n    \t// Create the RTCP receiver\r\n        rtcpReceiver = new RtcpPacketReceiver(localPort + 1, rtcpSession);\n        rtcpReceiver.start();\n    }\r\n\r\n    /**\r\n     * Close the input stream\r\n     */\r\n    public void close() {\r\n\t\ttry {\r\n\t\t\t// Close the RTP receiver\r\n\t\t\tif (rtpReceiver != null) {\r\n\t\t\t\trtpReceiver.close();\r\n\t\t\t}\r\n\r\n\t\t\t// Close the RTCP receiver\r\n\t\t\tif (rtcpReceiver != null) {\r\n\t\t\t\trtcpReceiver.close();\r\n\t\t\t}\r\n\t\t} catch(Exception e) {\r\n\t\t\tif (logger.isActivated()) {\r\n\t\t\t\tlogger.error(\"Can't close correctly RTP ressources\", e);\r\n\t\t\t}\r\n\t\t}\r\n\t}\n\n    /**\n     * Returns the RTP receiver\n     *\n     * @return RTP receiver\n     */\r\n    public RtpPacketReceiver getRtpReceiver() {\r\n    \treturn rtpReceiver;\r\n    }\r\n\n    /**\n     * Returns the RTCP receiver\n     *\n     * @return RTCP receiver\n     */\n    public RtcpPacketReceiver getRtcpReceiver() {\n        return rtcpReceiver;\n    }\n\r\n    /**\n     * Read from the input stream without blocking\n     *\n     * @return Buffer\n     * @throws Exception\n     */\r\n    public Buffer read() throws Exception {\r\n    \t// Wait and read a RTP packet\r\n    \tRtpPacket rtpPacket = rtpReceiver.readRtpPacket();\r\n    \tif (rtpPacket == null) {\r\n    \t\treturn null;\r\n    \t}\r\n\r\n    \t// Create a buffer\r\n        buffer.setData(rtpPacket.data);\r\n        buffer.setLength(rtpPacket.payloadlength);\r\n        buffer.setOffset(0);\r\n        buffer.setFormat(inputFormat);\r\n    \tbuffer.setSequenceNumber(rtpPacket.seqnum);\r\n    \tbuffer.setFlags(Buffer.FLAG_RTP_MARKER | Buffer.FLAG_RTP_TIME);\r\n    \tbuffer.setRTPMarker(rtpPacket.marker!=0);\r\n    \tbuffer.setTimeStamp(rtpPacket.timestamp);\r\n\r\n    \t// Set inputFormat back to null\r\n    \tinputFormat = null;\r\n    \treturn buffer;\r\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/util/Buffer.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.util;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.Format;\r\n\r\n/**\r\n * Buffer\r\n * \r\n * @author jexa7410\r\n */\r\npublic class Buffer {\r\n\t/**\r\n\t * Indicates that this buffer marks the end of media for the\r\n\t * data stream\r\n\t */\r\n\tpublic final static int FLAG_EOM = (1 << 0);\r\n\r\n\t/**\r\n\t * Indicates that the media data should be ignored\r\n\t */\r\n\tpublic final static int FLAG_DISCARD = (1 << 1);\r\n\r\n\t/**\r\n\t * Indicates that the buffer carries a time stamp that's relative to\r\n\t * the SystemTimeBase. This flag is generally set for data transferred\r\n\t * from hardware capture source that uses the system clock.\r\n\t */\r\n\tpublic final static int FLAG_SYSTEM_TIME = (1 << 7);\r\n\r\n\t/**\r\n\t * This is a marker bit for RTP\r\n\t */\r\n\tpublic final static int FLAG_RTP_MARKER = (1 << 11);\r\n\r\n\t/**\r\n\t * Indicates that the buffer carries a time stamp that's in RTP (NTP)\r\n\t * time units\r\n\t */\r\n\tpublic final static int FLAG_RTP_TIME = (1 << 12);\r\n\r\n\t/**\r\n\t * Indicates that the data is arriving from a live (real-time) source.\r\n\t */\r\n\tpublic final static int FLAG_LIVE_DATA = (1 << 15);\r\n\r\n\t/**\r\n\t * Default value if the time stamp of the media is not known\r\n\t */\r\n\tpublic final static long TIME_UNKNOWN = -1L;\r\n\r\n\t/**\r\n\t * Default value if the sequence number is not known\r\n\t */\r\n\tpublic final static long SEQUENCE_UNKNOWN = Long.MAX_VALUE - 1;\r\n\r\n\t/**\r\n\t * The time stamp of the data in nanoseconds\r\n\t */\r\n\tprotected long timeStamp = TIME_UNKNOWN;\r\n\r\n\t/**\r\n\t * The format of the data chunk\r\n\t */\r\n\tprotected Format format = null;\r\n\r\n\t/**\r\n\t * States how many samples are valid in the array of data\r\n\t */\r\n\tprotected int length = 0;\r\n\r\n    /** \r\n     * Starting point (offset) into the array where the valid data begins\r\n     */ \r\n    protected int offset = 0;\r\n\r\n    /**\r\n\t * A flag mask that describes the boolean attributes of the buffer\r\n\t */\r\n\tprotected int flags = 0;\r\n\r\n    /**\r\n     * The duration of the data in the buffer in nanoseconds\r\n     */\r\n    protected long duration = TIME_UNKNOWN;    \r\n    \r\n\t/**\r\n\t * Media data chunk\r\n\t */\r\n\tprotected Object data = null;\r\n\r\n\t/**\r\n\t * The sequence number\r\n\t */\r\n\tprotected long sequenceNumber = SEQUENCE_UNKNOWN;\r\n\r\n\t/**\r\n\t * Get the data format\r\n\t * \r\n\t * @return Format\r\n\t */\r\n\tpublic Format getFormat() {\r\n\t\treturn format;\r\n\t}\r\n\r\n\t/**\r\n\t * Set the data format\r\n\t * \r\n\t * @param format New format\r\n\t */\r\n\tpublic void setFormat(Format format) {\r\n\t\tthis.format = format;\r\n\t}\r\n\r\n\t/**\r\n\t * Get the flag mask\r\n\t * \r\n\t * @return Flag\r\n\t */\r\n\tpublic int getFlags() {\r\n\t\treturn flags;\r\n\t}\r\n\r\n\t/**\r\n\t * Set the flag mask\r\n\t * \r\n\t * @param flags New flags\r\n\t */\r\n\tpublic void setFlags(int flags) {\r\n\t\tthis.flags = flags;\r\n\t}\r\n\r\n\t/**\r\n\t * Check if it's the end of the media stream\r\n\t * \r\n\t * @return Boolean\r\n\t */\r\n\tpublic boolean isEOM() {\r\n\t\treturn (flags & FLAG_EOM) != 0;\r\n\t}\r\n\r\n\t/**\r\n\t * Set the EOM flag\r\n\t * \r\n\t * @param eom EOM status flag\r\n\t */\r\n\tpublic void setEOM(boolean eom) {\r\n\t\tif (eom)\r\n\t\t\tflags |= FLAG_EOM;\r\n\t\telse\r\n\t\t\tflags &= ~FLAG_EOM;\r\n\t}\r\n\t\r\n\t/**\r\n\t * Check if the RTP marker is set\r\n\t * \r\n\t * @return Boolean\r\n\t */\r\n\tpublic boolean isRTPMarkerSet() {\r\n\t\treturn (flags & FLAG_RTP_MARKER) != 0;\r\n\t}\r\n\r\n\t/**\r\n\t * Set the RTP marker\r\n\t * \r\n\t * @param marker RTP marker flag\r\n\t */\r\n\tpublic void setRTPMarker(boolean marker) {\r\n\t\tif (marker)\r\n\t\t\tflags |= FLAG_RTP_MARKER;\r\n\t\telse\r\n\t\t\tflags &= ~FLAG_RTP_MARKER;\r\n\t}\r\n\r\n\t/**\r\n\t * Check whether or not this buffer is to be discarded\r\n\t * \r\n\t * @return Boolean\r\n\t */\r\n\tpublic boolean isDiscard() {\r\n\t\treturn (flags & FLAG_DISCARD) != 0;\r\n\t}\r\n\r\n\t/**\r\n\t * Set the discard flag\r\n\t * \r\n\t * @param discard Discard flag.\r\n\t */\r\n\tpublic void setDiscard(boolean discard) {\r\n\t\tif (discard)\r\n\t\t\tflags |= FLAG_DISCARD;\r\n\t\telse\r\n\t\t\tflags &= ~FLAG_DISCARD;\r\n\t}\r\n\r\n\t/**\r\n\t * Get the internal data that holds the media chunk\r\n\t * \r\n\t * @return Data\r\n\t */\r\n\tpublic Object getData() {\r\n\t\treturn data;\r\n\t}\r\n\r\n\t/**\r\n\t * Set the internal data that holds the media chunk\r\n\t * \r\n\t * @param data Data\r\n\t */\r\n\tpublic void setData(Object data) {\r\n\t\tthis.data = data;\r\n\t}\r\n\r\n\t/**\r\n\t * Get the length of the valid data in the buffer\r\n\t * \r\n\t * @return The length of the valid data\r\n\t */\r\n\tpublic int getLength() {\r\n\t\treturn length;\r\n\t}\r\n\r\n\t/**\r\n\t * Set the length of the valid data stored in the buffer\r\n\t * \r\n\t * @param length The length of the valid data\r\n\t */\r\n\tpublic void setLength(int length) {\r\n\t\tthis.length = length;\r\n\t}\r\n\r\n\t/**\r\n\t * Get the offset into the data array where the valid data begins\r\n\t * \r\n\t * @return Offset\r\n\t */\r\n\tpublic int getOffset() {\r\n\t\treturn offset;\r\n\t}\r\n\r\n\t/**\r\n\t * Set the offset\r\n\t * \r\n\t * @param offset The starting point for the valid data\r\n\t */\r\n\tpublic void setOffset(int offset) {\r\n\t\tthis.offset = offset;\r\n\t}\r\n\r\n\t/**\r\n\t * Get the time stamp\r\n\t * \r\n\t * @return Time stamp in nanoseconds.\r\n\t */\r\n\tpublic long getTimeStamp() {\r\n\t\treturn timeStamp;\r\n\t}\r\n\r\n\t/**\r\n\t * Set the time stamp\r\n\t * \r\n\t * @param timeStamp Time stamp in nanoseconds\r\n\t */\r\n\tpublic void setTimeStamp(long timeStamp) {\r\n\t\tthis.timeStamp = timeStamp;\r\n\t}\r\n\r\n\t/**\r\n\t * Get the duration\r\n\t * \r\n\t * @return Duration in nanoseconds\r\n\t */\r\n\tpublic long getDuration() {\r\n\t\treturn duration;\r\n\t}\r\n\r\n\t/**\r\n\t * Set the duration\r\n\t * \r\n\t * @param duration Duration\r\n\t */\r\n\tpublic void setDuration(long duration) {\r\n\t\tthis.duration = duration;\r\n\t}\r\n\r\n\t/**\r\n\t * Set the sequence number\r\n\t * \r\n\t * @param number Sequence number\r\n\t */\r\n\tpublic void setSequenceNumber(long number) {\r\n\t\tsequenceNumber = number;\r\n\t}\r\n\r\n\t/**\r\n\t * Ges the sequence number\r\n\t * \r\n\t * @return Sequence number\r\n\t */\r\n\tpublic long getSequenceNumber() {\r\n\t\treturn sequenceNumber;\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/util/Packet.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.util;\r\n\r\n/**\r\n * Generic packet\r\n * \r\n * @author jexa7410\r\n */\r\npublic class Packet {\r\n\t/**\r\n\t * Data\r\n\t */\r\n\tpublic byte[] data;\r\n\t\r\n\t/**\r\n\t * Packet length\r\n\t */\r\n\tpublic int length;\r\n\r\n\t/**\r\n\t * Offset\r\n\t */\r\n\tpublic int offset;\r\n\t\t\r\n\t/**\r\n\t * Received at\r\n\t */\r\n\tpublic long receivedAt;\r\n\r\n\t/**\r\n\t * Constructor\r\n\t */\r\n\tpublic Packet() {\r\n\t}\r\n\r\n\t/**\r\n\t * Constructor\r\n\t * \r\n\t * @param packet Packet\r\n\t */\r\n\tpublic Packet(Packet packet) {\r\n\t\tdata = packet.data;\r\n\t\tlength = packet.length;\r\n\t\toffset = packet.offset;\r\n\t\treceivedAt = packet.receivedAt;\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/core/ims/protocol/rtp/util/SystemTimeBase.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.core.ims.protocol.rtp.util;\r\n\r\n/**\r\n * Time base\r\n */\r\npublic class SystemTimeBase {\r\n\r\n\t/**\r\n\t * Offset time (start-up time)\r\n\t */\r\n\tprivate static long offset = System.currentTimeMillis() * 1000000L;\r\n\r\n\t/**\r\n\t * Returns a time base value in nanoseconds\r\n\t * \r\n\t * @return Time\r\n\t */\r\n\tpublic long getTime() {\r\n\t\treturn (System.currentTimeMillis() * 1000000L) - offset;\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/platform/AndroidFactory.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.platform;\r\n\r\nimport android.content.Context;\r\n\r\n// import com.orangelabs.rcs.platform.file.FileFactory;\r\nimport com.orangelabs.rcs.platform.network.NetworkFactory;\r\nimport com.orangelabs.rcs.platform.registry.RegistryFactory;\r\n\r\n/**\r\n * Android platform\r\n * \r\n * @author jexa7410\r\n */\r\npublic class AndroidFactory {\n\t/**\r\n\t * Android application context\r\n\t */\r\n\tprivate static Context context = null;\r\n\r\n\t/**\r\n\t * Returns the application context\r\n\t * \r\n\t * @return Context\r\n\t */\r\n\tpublic static Context getApplicationContext() {\r\n\t\treturn context;\r\n\t}\n\t\r\n\t/**\r\n\t * Load factory\r\n\t * \r\n\t * @param context Context\r\n\t */\r\n\tpublic static void setApplicationContext(Context context) {\r\n\t\t\n\t\tAndroidFactory.context = context;\r\n\t\ttry {\n\n\t\t\tNetworkFactory.loadFactory(\"com.orangelabs.rcs.platform.network.AndroidNetworkFactory\");\n\t\t\tRegistryFactory.loadFactory(\"com.orangelabs.rcs.platform.registry.AndroidRegistryFactory\");\n\t\t\t\n\t\t\t// FileFactory.loadFactory(\"com.orangelabs.rcs.platform.file.AndroidFileFactory\");\n\t\t\n\t\t} catch(FactoryException e) {\n\t\t\te.printStackTrace();\n\t\t}\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/platform/FactoryException.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.platform;\r\n\r\n/**\r\n * Factory exception\r\n * \r\n * @author JM. Auffret\r\n */\r\npublic class FactoryException extends java.lang.Exception {\r\n\tstatic final long serialVersionUID = 1L;\r\n\t\r\n\t/**\r\n\t * Constructor\r\n\t *\r\n\t * @param error Error message\r\n\t */\r\n\tpublic FactoryException(String error) {\r\n\t\tsuper(error);\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/platform/file/FileDescription.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.platform.file;\r\n\r\n/**\r\n * File description\r\n * \r\n * @author jexa7410\r\n */\r\npublic class FileDescription {\r\n\t/**\r\n\t * Name\r\n\t */\r\n\tprivate String name;\r\n\t\r\n\t/**\r\n\t * Size\r\n\t */\r\n\tprivate long size = -1;\r\n\t\t\r\n\t/**\r\n\t * Directory\r\n\t */\t\r\n\tprivate boolean directory = false;\r\n\t\r\n\t/**\r\n\t * Constructor\r\n\t */\r\n\tpublic FileDescription(String name, long size) {\r\n\t\tthis.name = name;\r\n\t\tthis.size = size;\r\n\t}\r\n\r\n\t/**\r\n\t * Constructor\r\n\t */\r\n\tpublic FileDescription(String name, long size, boolean directory) {\r\n\t\tthis.name = name;\r\n\t\tthis.size = size;\r\n\t\tthis.directory = directory;\r\n\t}\r\n\r\n\t/**\r\n\t * Returns the size of the file\r\n\t * \r\n\t * @return File size\r\n\t */\r\n\tpublic long getSize() {\r\n\t\treturn size;\r\n\t}\r\n\r\n\t/**\r\n\t * Returns the name of the file\r\n\t * \r\n\t * @return File name\r\n\t */\r\n\tpublic String getName() {\r\n\t\treturn name;\r\n\t}\r\n\r\n\t/**\r\n\t * Is a directory\r\n\t * \r\n\t * @return Boolean\r\n\t */\r\n\tpublic boolean isDirectory() {\r\n\t\treturn directory;\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/platform/file/FileFactory.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.platform.file;\r\n\r\nimport com.orangelabs.rcs.platform.FactoryException;\n\nimport java.io.File;\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.io.OutputStream;\n\r\n/**\r\n * File factory\r\n * \r\n * @author jexa7410\r\n */\r\npublic abstract class FileFactory {\r\n\t/**\r\n\t * Current platform factory\r\n\t */\r\n\tprivate static FileFactory factory = null;\r\n\t\r\n\t/**\r\n\t * Load the factory\r\n\t * \r\n\t * @param classname Factory classname\r\n\t * @throws Exception\r\n\t */\r\n\tpublic static void loadFactory(String classname) throws FactoryException {\r\n\t\tif (factory != null) {\r\n\t\t\treturn;\r\n\t\t}\r\n\t\t\r\n\t\ttry {\r\n\t\t\tfactory = (FileFactory)Class.forName(classname).newInstance();\r\n\t\t} catch(Exception e) {\r\n\t\t\tthrow new FactoryException(\"Can't load the factory \" + classname);\r\n\t\t}\r\n\t}\r\n\t\r\n\t/**\r\n\t * Returns the current factory\r\n\t * \r\n\t * @return Factory\r\n\t */\r\n\tpublic static FileFactory getFactory() {\r\n\t\treturn factory;\r\n\t}\r\n\r\n\t/**\r\n\t * Open a file input stream\r\n\t * \r\n\t * @param url URL\r\n\t * @return Input stream\r\n\t * @throws IOException\r\n\t */\r\n\tpublic abstract InputStream openFileInputStream(String url) throws IOException;\r\n\r\n\t/**\r\n\t * Open a file output stream\r\n\t * \r\n\t * @param url URL\r\n\t * @return Output stream\r\n\t * @throws IOException\r\n\t */\r\n\tpublic abstract OutputStream openFileOutputStream(String url) throws IOException;\r\n\t\r\n\t/**\r\n\t * Returns the description of a file\r\n\t * \r\n\t * @param url URL of the file\r\n\t * @return File description\r\n\t * @throws IOException\r\n\t */\r\n\tpublic abstract FileDescription getFileDescription(String url) throws IOException;\r\n\t\r\n\t/**\r\n\t * Returns the root directory for photos\r\n\t * \r\n\t *  @return Directory path\r\n\t */\r\n\tpublic abstract String getPhotoRootDirectory();\r\n\r\n\t/**\r\n\t * Returns the root directory for videos\r\n\t * \r\n\t *  @return Directory path\r\n\t */\r\n\tpublic abstract String getVideoRootDirectory();\r\n\t\r\n\t/**\r\n\t * Returns the root directory for files\r\n\t * \r\n\t *  @return Directory path\r\n\t */\r\n\tpublic abstract String getFileRootDirectory();\r\n\t\r\n\t/**\r\n\t * Update the media storage\r\n\t * \r\n\t * @param url New URL to be added\r\n\t */\r\n\tpublic abstract void updateMediaStorage(String url);\t\r\n\t\r\n\t/**\r\n\t * Returns whether a file exists or not\r\n\t * \r\n\t * @param url Url of the file to check\r\n\t * @return File existence\r\n\t */\r\n\tpublic abstract boolean fileExists(String url);\n\t\n\t/**\n\t * Create a directory if not already exist\n\t * \n\t * @param path Directory path\n\t * @return true if the directory exists or is created\n\t */\n\tpublic static boolean createDirectory(String path) {\n\t\tFile dir = new File(path); \n\t\tif (!dir.exists()) {\n\t\t\tif (!dir.mkdirs()) {\n                return false; \n\t\t\t}\n\t\t}\n        return true;\n\t}\t\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/platform/logger/AndroidAppender.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.platform.logger;\r\n\r\nimport android.util.Log;\r\n\r\nimport com.orangelabs.rcs.utils.logger.Appender;\r\nimport com.orangelabs.rcs.utils.logger.Logger;\r\n\r\n/**\r\n * Android appender \r\n * \r\n * @author jexa7410\r\n */\r\npublic class AndroidAppender extends Appender {\r\n\t/**\r\n\t * Constructor\r\n\t */\r\n\tpublic AndroidAppender() {\r\n\t\tsuper();\r\n\t}\r\n\r\n\t/**\r\n\t * Print a trace\r\n\t *\r\n\t * @param classname Classname\r\n\t * @param level Trace level\r\n\t * @param trace Trace\r\n\t */\r\n\tpublic synchronized void printTrace(String classname, int level, String trace) {\r\n\t\tclassname = \"[RCS][\" + classname + \"]\";\r\n\t\t\r\n\t\tif (level == Logger.INFO_LEVEL) {\r\n\t\t\tLog.i(classname, trace);\r\n\t\t} else\r\n\t\tif (level == Logger.WARN_LEVEL) {\r\n\t\t\tLog.w(classname, trace);\r\n\t\t} else\r\n\t\tif (level == Logger.ERROR_LEVEL) {\r\n\t\t\tLog.e(classname, trace);\r\n\t\t} else\r\n\t\tif (level == Logger.FATAL_LEVEL) {\r\n\t\t\tLog.e(classname, trace);\r\n\t\t} else {\r\n\t\t\tLog.v(classname, trace);\r\n\t\t}\r\n\t }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/platform/network/AndroidDatagramConnection.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.platform.network;\r\n\r\nimport java.io.IOException;\r\nimport java.net.DatagramPacket;\r\nimport java.net.DatagramSocket;\r\nimport java.net.InetAddress;\r\n\r\n\r\n/**\r\n * Android datagram server connection\r\n * \r\n * @author jexa7410\r\n */\r\npublic class AndroidDatagramConnection implements DatagramConnection {\r\n\t/**\r\n\t * Datagram connection\r\n\t */\r\n\tprivate DatagramSocket connection = null; \r\n\t\r\n\t/**\r\n\t * Constructor\r\n\t */\r\n\tpublic AndroidDatagramConnection() {\r\n\t}\r\n\r\n\t/**\r\n\t * Open the datagram connection\r\n\t * \r\n\t * @throws IOException\r\n\t */\r\n\tpublic void open() throws IOException {\r\n\t\tconnection = new DatagramSocket();\r\n\t}\r\n\r\n\t/**\r\n\t * Open the datagram connection\r\n\t * \r\n\t * @param port Local port\r\n\t * @throws IOException\r\n\t */\r\n\tpublic void open(int port) throws IOException {\r\n\t\tconnection = new DatagramSocket(port);\r\n\t}\r\n\r\n\t/**\r\n\t * Close the datagram connection\r\n\t * \r\n\t * @throws IOException\r\n\t */\r\n\tpublic void close() throws IOException {\r\n\t\tif (connection != null) {\r\n\t\t\tconnection.close();\r\n\t\t\tconnection = null;\r\n\t\t}\r\n\t}\r\n\t\r\n\t/**\r\n\t * Receive data with a specific buffer size\r\n\t * \r\n\t * @param bufferSize Buffer size \r\n\t * @return Byte array\r\n\t * @throws IOException\r\n\t */\r\n\tpublic byte[] receive(int bufferSize) throws IOException {\r\n\t\tif (connection != null) {\r\n\t\t\tbyte[] buf = new byte[bufferSize];\r\n\t\t\tDatagramPacket packet = new DatagramPacket(buf, buf.length);\r\n\t\t\tconnection.receive(packet);\r\n\t\t\t\r\n\t\t\tint packetLength = packet.getLength();\r\n\t        byte[] bytes =  packet.getData();\r\n\t        byte[] data = new byte[packetLength];\r\n\t        System.arraycopy(bytes, 0, data, 0, packetLength);\r\n\t\t\treturn data;\r\n\t\t} else {\r\n\t\t\tthrow new IOException(\"Connection not openned\");\r\n\t\t}\r\n\t}\r\n\t\r\n\t/**\r\n\t * Receive data\r\n\t * \r\n\t * @return Byte array\r\n\t * @throws IOException\r\n\t */\r\n\tpublic byte[] receive() throws IOException {\r\n\t\treturn receive(DatagramConnection.DEFAULT_DATAGRAM_SIZE);\r\n\t}\r\n\t\r\n\t/**\r\n\t * Send data\r\n\t * \r\n\t * @param remoteAddr Remote address\r\n\t * @param remotePort Remote port\r\n\t * @param data Data as byte array\r\n\t * @throws IOException\r\n\t */\r\n\tpublic void send(String remoteAddr, int remotePort, byte[] data) throws IOException {\r\n\t\tif (data == null) {\r\n\t\t\treturn;\r\n\t\t}\r\n\t\t\r\n\t\tif (connection != null) {\r\n\t\t\tInetAddress address = InetAddress.getByName(remoteAddr);\r\n\t\t\tDatagramPacket packet = new DatagramPacket(data, data.length, address, remotePort);\r\n\t\t\tconnection.send(packet);\r\n\t\t} else {\r\n\t\t\tthrow new IOException(\"Connection not openned\");\r\n\t\t}\r\n\t}\r\n\t\r\n\t/**\r\n\t * Returns the local address\r\n\t * \r\n\t * @return Address\r\n\t * @throws IOException\r\n\t */\r\n\tpublic String getLocalAddress() throws IOException {\r\n\t\tif (connection != null) {\r\n\t\t\treturn connection.getLocalAddress().getHostAddress();\r\n\t\t} else {\r\n\t\t\tthrow new IOException(\"Connection not openned\");\r\n\t\t}\r\n\t}\r\n\r\n\t/**\r\n\t * Returns the local port\r\n\t * \r\n\t * @return Port\r\n\t * @throws IOException\r\n\t */\r\n\tpublic int getLocalPort() throws IOException {\r\n\t\tif (connection != null) {\r\n\t\t\treturn connection.getLocalPort();\r\n\t\t} else {\r\n\t\t\tthrow new IOException(\"Connection not openned\");\r\n\t\t}\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/platform/network/AndroidHttpConnection.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.platform.network;\r\n\r\nimport java.io.ByteArrayOutputStream;\r\nimport java.io.IOException;\r\nimport java.io.InputStream;\r\nimport java.net.HttpURLConnection;\r\nimport java.net.URL;\r\n\r\n\r\n/**\r\n * Android HTTP connection\r\n * \r\n * @author jexa7410\r\n */\r\npublic class AndroidHttpConnection implements HttpConnection {\r\n\t/**\r\n\t * HTTP connection\r\n\t */\r\n\tprivate HttpURLConnection connection = null;\r\n\t\r\n\t/**\r\n\t * Open the HTTP connection\r\n\t * \r\n\t * @param url Remote URL\r\n\t * @throws IOException\r\n\t */\r\n\tpublic void open(String url) throws IOException {\r\n\t\tURL urlConn = new URL(url);\r\n\t\tconnection = (HttpURLConnection)urlConn.openConnection();\r\n\t\tconnection.connect();\r\n\t}\r\n\r\n\t/**\r\n\t * Close the HTTP connection\r\n\t * \r\n\t * @throws IOException\r\n\t */\r\n\tpublic void close() throws IOException {\r\n\t\tif (connection != null) {\r\n\t\t\tconnection.disconnect();\r\n\t\t}\r\n\t}\r\n\t\r\n\t/**\r\n\t * HTTP GET request\r\n\t * \r\n\t * @return Response\r\n\t * @throws IOException\r\n\t */\r\n\tpublic ByteArrayOutputStream get() throws IOException {\r\n\t\tif (connection != null) {\r\n\t\t\treturn sendHttpRequest(HttpConnection.GET_METHOD);\r\n\t\t} else {\r\n\t\t\tthrow new IOException(\"Connection not openned\");\r\n\t\t}\r\n\t}\r\n\t\r\n\t/**\r\n\t * HTTP POST request\r\n\t * \r\n\t * @return Response\r\n\t * @throws IOException\r\n\t */\r\n\tpublic ByteArrayOutputStream post() throws IOException {\r\n\t\tif (connection != null) {\r\n\t\t\treturn sendHttpRequest(HttpConnection.POST_METHOD);\r\n\t\t} else {\r\n\t\t\tthrow new IOException(\"Connection not openned\");\r\n\t\t}\r\n\t}\r\n\t\r\n\t/**\r\n\t * Send HTTP request\r\n\t * \r\n\t * @param method HTTP method\r\n\t * @return Response\r\n\t * @throws IOException\r\n\t */\r\n\tprivate ByteArrayOutputStream sendHttpRequest(String method) throws IOException {\r\n        connection.setRequestMethod(method);\r\n        int rc = connection.getResponseCode();\r\n        if (rc != HttpURLConnection.HTTP_OK) {\r\n            throw new IOException(\"HTTP error \" + rc);\r\n        }\r\n        \r\n        InputStream inputStream = connection.getInputStream();\r\n        ByteArrayOutputStream result = new ByteArrayOutputStream();\r\n    \tint ch;\r\n    \twhile((ch = inputStream.read()) != -1) {\r\n    \t\tresult.write(ch);\r\n    \t}\r\n    \tinputStream.close();\r\n    \t\r\n        return result;\t\t\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/platform/network/AndroidNetworkFactory.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.platform.network;\r\n\r\nimport java.net.InetAddress;\nimport java.net.NetworkInterface;\nimport java.util.Enumeration;\n\n/**\n * Android network factory\n * \n * @author jexa7410\n */\r\npublic class AndroidNetworkFactory extends NetworkFactory {\r\n\r\n\t/**\r\n\t * Returns the local IP address\r\n\t *\r\n\t * @return IP address\r\n\t */\r\n\tpublic String getLocalIpAddress() {\r\n\t\ttry {\r\n\t        for (Enumeration<NetworkInterface> en = NetworkInterface.getNetworkInterfaces(); en.hasMoreElements();) {\r\n\t            NetworkInterface intf = (NetworkInterface)en.nextElement();\r\n\t            for (Enumeration<InetAddress> addr = intf.getInetAddresses(); addr.hasMoreElements();) {\r\n\t                InetAddress inetAddress = (InetAddress)addr.nextElement();\n                    if (!inetAddress.isLoopbackAddress() && !inetAddress.isLinkLocalAddress()) {\n                        return inetAddress.getHostAddress().toString();\n                    }\r\n\t            }\r\n\t        }\r\n\t        return null;\r\n\t\t} catch(Exception e) {\r\n\t\t\treturn null;\r\n\t\t}\r\n\t}\n\n    /**\n     * Create a datagram connection\n     * \n     * @return Datagram connection\n     */\r\n\tpublic DatagramConnection createDatagramConnection() {\r\n\t\treturn new AndroidDatagramConnection();\r\n\t}\n\n    /**\n     * Create a socket client connection\n     * \n     * @return Socket connection\n     */\r\n\tpublic SocketConnection createSocketClientConnection() {\r\n\t\treturn new AndroidSocketConnection();\r\n\t}\n\n    /**\n     * Create a socket server connection\n     * \n     * @return Socket server connection\n     */\r\n\tpublic SocketServerConnection createSocketServerConnection() {\r\n\t\treturn new AndroidSocketServerConnection();\r\n\t}\n\n    /**\n     * Create an HTTP connection\n     * \n     * @return HTTP connection\n     */\r\n\tpublic HttpConnection createHttpConnection() {\r\n\t\treturn new AndroidHttpConnection();\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/platform/network/AndroidSocketConnection.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.platform.network;\r\n\r\nimport java.io.IOException;\r\nimport java.io.InputStream;\r\nimport java.io.OutputStream;\r\nimport java.net.Socket;\r\n\r\n\r\n/**\r\n * Android socket connection\r\n * \r\n * @author jexa7410\r\n */\r\npublic class AndroidSocketConnection implements SocketConnection {\r\n\t/**\r\n\t * Socket connection\r\n\t */\r\n\tprivate Socket socket = null;\r\n\t\r\n\t/**\r\n\t * Constructor\r\n\t */\r\n\tpublic AndroidSocketConnection() {\r\n\t}\r\n\t\r\n\t/**\r\n\t * Constructor\r\n\t * \r\n\t * @param socket Socket\r\n\t */\r\n\tpublic AndroidSocketConnection(Socket socket) {\r\n\t\tthis.socket = socket;\r\n\t}\r\n\r\n\t/**\r\n\t * Open the socket\r\n\t * \r\n\t * @param remoteAddr Remote address\r\n\t * @param remotePort Remote port\r\n\t * @throws IOException\r\n\t */\r\n\tpublic void open(String remoteAddr, int remotePort) throws IOException {\r\n\t\tsocket = new Socket(remoteAddr, remotePort);\r\n\t}\r\n\r\n\t/**\r\n\t * Close the socket\r\n\t * \r\n\t * @throws IOException\r\n\t */\r\n\tpublic void close() throws IOException {\r\n\t\tif (socket != null) {\r\n\t\t\tsocket.close();\r\n\t\t\tsocket = null;\r\n\t\t}\t\t\r\n\t}\r\n\t\r\n\t/**\r\n\t * Returns the socket input stream\r\n\t * \r\n\t * @return Input stream\r\n\t * @throws IOException\r\n\t */\r\n\tpublic InputStream getInputStream() throws IOException {\r\n\t\tif (socket != null) {\r\n\t\t\treturn socket.getInputStream();\r\n\t\t} else {\r\n\t\t\tthrow new IOException(\"Connection not openned\");\r\n\t\t}\r\n\t}\r\n\t\r\n\t/**\r\n\t * Returns the socket output stream\r\n\t * \r\n\t * @return Output stream\r\n\t * @throws IOException\r\n\t */\r\n\tpublic OutputStream getOutputStream() throws IOException {\r\n\t\tif (socket != null) {\r\n\t\t\treturn socket.getOutputStream();\r\n\t\t} else {\r\n\t\t\tthrow new IOException(\"Connection not openned\");\r\n\t\t}\r\n\t}\r\n\t\r\n\t/**\r\n\t * Returns the remote address of the connection\r\n\t * \r\n\t * @return Address\r\n\t * @throws IOException\r\n\t */\r\n\tpublic String getRemoteAddress() throws IOException {\r\n\t\tif (socket != null) {\r\n\t\t\treturn socket.getInetAddress().getHostAddress();\r\n\t\t} else {\r\n\t\t\tthrow new IOException(\"Connection not openned\");\r\n\t\t}\r\n\t}\r\n\t\r\n\t/**\r\n\t * Returns the remote port of the connection\r\n\t * \r\n\t * @return Port\r\n\t * @throws IOException\r\n\t */\r\n\tpublic int getRemotePort() throws IOException {\r\n\t\tif (socket != null) {\r\n\t\t\treturn socket.getPort();\r\n\t\t} else {\r\n\t\t\tthrow new IOException(\"Connection not openned\");\r\n\t\t}\r\n\t}\r\n\r\n\t/**\r\n\t * Returns the local address of the connection\r\n\t * \r\n\t * @return Address\r\n\t * @throws IOException\r\n\t */\r\n\tpublic String getLocalAddress() throws IOException {\r\n\t\tif (socket != null) {\r\n\t\t\treturn socket.getLocalAddress().getHostAddress();\r\n\t\t} else {\r\n\t\t\tthrow new IOException(\"Connection not openned\");\r\n\t\t}\r\n\t}\r\n\r\n\t/**\r\n\t * Returns the local port of the connection\r\n\t * \r\n\t * @return Port\r\n\t * @throws IOException\r\n\t */\r\n\tpublic int getLocalPort() throws IOException {\r\n\t\tif (socket != null) {\r\n\t\t\treturn socket.getLocalPort();\r\n\t\t} else {\r\n\t\t\tthrow new IOException(\"Connection not openned\");\r\n\t\t}\r\n\t}\r\n\t\r\n\t/**\r\n\t * Get the timeout for this socket during which a reading\r\n\t * operation shall block while waiting for data\r\n\t * \r\n\t * @return Timeout in milliseconds\r\n\t * @throws IOException\r\n\t */\r\n\tpublic int getSoTimeout() throws IOException {\r\n\t\tif (socket != null) {\r\n\t\t\treturn socket.getSoTimeout();\r\n\t\t} else {\r\n\t\t\tthrow new IOException(\"Connection not openned\");\r\n\t\t}\r\n\t}\r\n\r\n\t/**\r\n\t * Set the timeout for this socket during which a reading\r\n\t * operation shall block while waiting for data\r\n\t * \r\n\t * @param timeout Timeout in milliseconds\r\n\t * @throws IOException\r\n\t */\r\n\tpublic void setSoTimeout(int timeout) throws IOException {\r\n\t\tif (socket != null) {\r\n\t\t\tsocket.setSoTimeout(timeout);\r\n\t\t} else {\r\n\t\t\tthrow new IOException(\"Connection not openned\");\r\n\t\t}\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/platform/network/AndroidSocketServerConnection.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.platform.network;\r\n\r\nimport java.io.IOException;\r\nimport java.net.ServerSocket;\r\nimport java.net.Socket;\r\n\r\nimport com.orangelabs.rcs.utils.logger.Logger;\r\n\r\n\r\n/**\r\n * Android socket connection\r\n * \r\n * @author jexa7410\r\n */\r\npublic class AndroidSocketServerConnection implements SocketServerConnection {\r\n\t/**\r\n\t * Socket server connection\r\n\t */\r\n\tprivate ServerSocket acceptSocket = null; \r\n\r\n\t/**\r\n     * The logger\r\n     */\r\n    private Logger logger = Logger.getLogger(this.getClass().getName());\r\n\r\n    /**\r\n\t * Constructor\r\n\t */\r\n\tpublic AndroidSocketServerConnection() {\r\n\t}\r\n\r\n\t/**\r\n\t * Open the socket\r\n\t * \r\n\t * @param port Local port\r\n\t * @throws IOException\r\n\t */\r\n\tpublic void open(int port) throws IOException {\r\n\t\tacceptSocket = new ServerSocket(port);\r\n\t}\r\n\r\n\t/**\r\n\t * Close the socket\r\n\t * \r\n\t * @throws IOException\r\n\t */\r\n\tpublic void close() throws IOException {\r\n\t\tif (acceptSocket != null) {\r\n\t\t\tacceptSocket.close();\r\n\t\t\tacceptSocket = null;\t\t\r\n\t\t}\r\n\t}\r\n\t\r\n\t/**\r\n\t * Accept connection\r\n\t * \r\n\t * @return Socket connection\r\n\t * @throws IOException\r\n\t */\r\n\tpublic SocketConnection acceptConnection() throws IOException {\r\n\t\tif (acceptSocket != null) { \r\n\t\t\tif (logger.isActivated()) {\r\n\t\t\t\tlogger.debug(\"Socket serverSocket is waiting for incoming connection\");\r\n\t\t\t}\r\n\t\t\tSocket socket = acceptSocket.accept();\t\t\r\n\t\t\treturn new AndroidSocketConnection(socket);\r\n\t\t} else {\r\n\t\t\tthrow new IOException(\"Connection not openned\");\r\n\t\t}\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/platform/network/DatagramConnection.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.platform.network;\r\n\r\nimport java.io.IOException;\r\n\r\n/**\r\n * Datagram connection\r\n * \r\n * @author jexa7410\r\n */\r\npublic interface DatagramConnection {\r\n\t/**\r\n\t * Default datagram packet size\r\n\t */\r\n\tpublic static int DEFAULT_DATAGRAM_SIZE = 4096 * 8;\r\n\t\r\n\t/**\r\n\t * Open the datagram connection\r\n\t * \r\n\t * @throws IOException\r\n\t */\r\n\tpublic void open() throws IOException;\r\n\t\r\n\t/**\r\n\t * Open the datagram connection\r\n\t * \r\n\t * @param port Local port\r\n\t * @throws IOException\r\n\t */\r\n\tpublic void open(int port) throws IOException;\r\n\r\n\t/**\r\n\t * Close the datagram connection\r\n\t * \r\n\t * @throws IOException\r\n\t */\r\n\tpublic void close() throws IOException;\r\n\t\r\n\t/**\r\n\t * Send data\r\n\t * \r\n\t * @param remoteAddr Remote address\r\n\t * @param remotePort Remote port\r\n\t * @param data Data as byte array\r\n\t * @throws IOException\r\n\t */\r\n\tpublic void send(String remoteAddr, int remotePort, byte[] data) throws IOException;\r\n\t\r\n\t/**\r\n\t * Receive data\r\n\t * \r\n\t * @return Byte array\r\n\t * @throws IOException\r\n\t */\r\n\tpublic byte[] receive() throws IOException;\r\n\t\r\n\t/**\r\n\t * Receive data with a specific buffer size\r\n\t * \r\n\t * @param bufferSize Buffer size \r\n\t * @return Byte array\r\n\t * @throws IOException\r\n\t */\r\n\tpublic byte[] receive(int bufferSize) throws IOException;\t\r\n\t\r\n\t/**\r\n\t * Returns the local address\r\n\t * \r\n\t * @return Address\r\n\t * @throws IOException\r\n\t */\r\n\tpublic String getLocalAddress() throws IOException;\r\n\r\n\t/**\r\n\t * Returns the local port\r\n\t * \r\n\t * @return Port\r\n\t * @throws IOException\r\n\t */\r\n\tpublic int getLocalPort() throws IOException;\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/platform/network/HttpConnection.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.platform.network;\r\n\r\nimport java.io.ByteArrayOutputStream;\r\nimport java.io.IOException;\r\n\r\n/**\r\n * HTTP connection\r\n * \r\n * @author jexa7410\r\n */\r\npublic interface HttpConnection {\r\n\t/**\r\n\t * GET method\r\n\t */\r\n\tpublic final static String GET_METHOD = \"GET\";\r\n\t\r\n\t/**\r\n\t * POST method\r\n\t */\r\n\tpublic final static String POST_METHOD = \"POST\";\r\n\r\n\t/**\r\n\t * Open the HTTP connection\r\n\t * \r\n\t * @param url Remote URL\r\n\t * @throws IOException\r\n\t */\r\n\tpublic void open(String url) throws IOException;\r\n\r\n\t/**\r\n\t * Close the HTTP connection\r\n\t * \r\n\t * @throws IOException\r\n\t */\r\n\tpublic void close() throws IOException;\r\n\t\r\n\t/**\r\n\t * HTTP GET request\r\n\t * \r\n\t * @return Response\r\n\t * @throws IOException\r\n\t */\r\n\tpublic ByteArrayOutputStream get() throws IOException;\r\n\t\r\n\t/**\r\n\t * HTTP POST request\r\n\t * \r\n\t * @return Response\r\n\t * @throws IOException\r\n\t */\r\n\tpublic ByteArrayOutputStream post() throws IOException;\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/platform/network/NetworkFactory.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.platform.network;\r\n\r\nimport com.orangelabs.rcs.platform.FactoryException;\r\n\r\n/**\r\n * Network factory \r\n * \r\n * @author jexa7410\r\n */\r\npublic abstract class NetworkFactory {\r\n\t/**\r\n\t * Current platform factory\r\n\t */\r\n\tprivate static NetworkFactory factory = null;\r\n\t\r\n\t/**\r\n\t * Load the factory\r\n\t * \r\n\t * @param classname Factory classname\r\n\t * @throws Exception\r\n\t */\r\n\tpublic static void loadFactory(String classname) throws FactoryException {\r\n\t\tif (factory != null) {\r\n\t\t\treturn;\r\n\t\t}\r\n\t\t\r\n\t\ttry {\r\n\t\t\tfactory = (NetworkFactory)Class.forName(classname).newInstance();\r\n\t\t} catch(Exception e) {\r\n\t\t\tthrow new FactoryException(\"Can't load the factory \" + classname);\r\n\t\t}\r\n\t}\r\n\t\r\n\t/**\r\n\t * Returns the current factory\r\n\t * \r\n\t * @return Factory\r\n\t */\r\n\tpublic static NetworkFactory getFactory() {\r\n\t\treturn factory;\r\n\t}\r\n\r\n\t/**\r\n\t * Returns the local IP address\r\n\t * \r\n\t * @return Address\r\n\t */\r\n\tpublic abstract String getLocalIpAddress();\r\n\t\r\n\t/**\r\n\t * Create a datagram connection\r\n\t * \r\n\t * @return Datagram connection\r\n\t */\r\n\tpublic abstract DatagramConnection createDatagramConnection();\r\n\r\n\t/**\r\n\t * Create a socket client connection\r\n\t * \r\n\t * @return Socket connection\r\n\t */\r\n\tpublic abstract SocketConnection createSocketClientConnection();\r\n\r\n\t/**\r\n\t * Create a socket server connection\r\n\t * \r\n\t * @return Socket server connection\r\n\t */\r\n\tpublic abstract SocketServerConnection createSocketServerConnection();\r\n\t\r\n\t/**\r\n\t * Create an HTTP connection\r\n\t * \r\n\t * @return HTTP connection\r\n\t */\r\n\tpublic abstract HttpConnection createHttpConnection();\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/platform/network/SocketConnection.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.platform.network;\r\n\r\nimport java.io.IOException;\r\nimport java.io.InputStream;\r\nimport java.io.OutputStream;\r\n\r\n/**\r\n * Socket client connection\r\n * \r\n * @author jexa7410\r\n */\r\npublic interface SocketConnection {\r\n\t/**\r\n\t * Open the socket\r\n\t * \r\n\t * @param remoteAddr Remote address\r\n\t * @param remotePort Remote port\r\n\t * @throws IOException\r\n\t */\r\n\tpublic void open(String remoteAddr, int remotePort) throws IOException;\r\n\r\n\t/**\r\n\t * Close the socket\r\n\t * \r\n\t * @throws IOException\r\n\t */\r\n\tpublic void close() throws IOException;\r\n\t\r\n\t/**\r\n\t * Returns the socket input stream\r\n\t * \r\n\t * @return Input stream\r\n\t * @throws IOException\r\n\t */\r\n\tpublic InputStream getInputStream() throws IOException;\r\n\t\r\n\t/**\r\n\t * Returns the socket output stream\r\n\t * \r\n\t * @return Output stream\r\n\t * @throws IOException\r\n\t */\r\n\tpublic OutputStream getOutputStream() throws IOException;\r\n\r\n\t/**\r\n\t * Returns the remote address of the connection\r\n\t * \r\n\t * @return Address\r\n\t * @throws IOException\r\n\t */\r\n\tpublic String getRemoteAddress() throws IOException;\r\n\t\r\n\t/**\r\n\t * Returns the remote port of the connection\r\n\t * \r\n\t * @return Port\r\n\t * @throws IOException\r\n\t */\r\n\tpublic int getRemotePort() throws IOException;\r\n\r\n\t/**\r\n\t * Returns the local address of the connection\r\n\t * \r\n\t * @return Address\r\n\t * @throws IOException\r\n\t */\r\n\tpublic String getLocalAddress() throws IOException;\r\n\t\r\n\t/**\r\n\t * Returns the local port of the connection\r\n\t * \r\n\t * @return Port\r\n\t * @throws IOException\r\n\t */\r\n\tpublic int getLocalPort() throws IOException;\r\n\t\r\n\t/**\r\n\t * Get the timeout for this socket during which a reading\r\n\t * operation shall block while waiting for data\r\n\t * \r\n\t * @return Milliseconds\r\n\t * @throws IOException\r\n\t */\r\n\tpublic int getSoTimeout() throws IOException;\r\n\r\n\t/**\r\n\t * Set the timeout for this socket during which a reading\r\n\t * operation shall block while waiting for data\r\n\t * \r\n\t * @param timeout Timeout in milliseconds\r\n\t * @throws IOException\r\n\t */\r\n\tpublic void setSoTimeout(int timeout) throws IOException;\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/platform/network/SocketServerConnection.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.platform.network;\r\n\r\nimport java.io.IOException;\r\n\r\n/**\r\n * Socket server connection\r\n * \r\n * @author jexa7410\r\n */\r\npublic interface SocketServerConnection {\r\n\t/**\r\n\t * Open the socket\r\n\t * \r\n\t * @param port Local port\r\n\t * @throws IOException\r\n\t */\r\n\tpublic void open(int port) throws IOException;\r\n\r\n\t/**\r\n\t * Close the socket\r\n\t * \r\n\t * @throws IOException\r\n\t */\r\n\tpublic void close() throws IOException;\r\n\t\r\n\t/**\r\n\t * Accept connection\r\n\t * \r\n\t * @return Socket connection\r\n\t * @throws IOException\r\n\t */\r\n\tpublic SocketConnection acceptConnection() throws IOException;\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/platform/registry/AndroidRegistryFactory.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.platform.registry;\r\n\r\nimport com.orangelabs.rcs.core.CoreException;\nimport com.orangelabs.rcs.platform.AndroidFactory;\n\nimport android.app.Activity;\nimport android.content.SharedPreferences;\n\r\n/**\r\n * Android registry factory\r\n * \r\n * @author jexa7410\r\n */\r\npublic class AndroidRegistryFactory extends RegistryFactory {\r\n\t/**\r\n\t * RCS registry name\r\n\t */\r\n\tpublic static final String RCS_PREFS = \"RCS\";\r\n\r\n\t/**\r\n\t * Shared preference\r\n\t */\r\n\tprivate SharedPreferences preferences;\r\n\r\n\t/**\r\n     * Constructor\r\n     * \r\n     * @throws CoreException\r\n     */\r\n\tpublic AndroidRegistryFactory() throws CoreException {\r\n\t\tsuper();\r\n\r\n\t\tif (AndroidFactory.getApplicationContext() == null) {\r\n\t\t\tthrow new CoreException(\"Application context not initialized\");\r\n\t\t}\r\n\t\t\r\n\t\tpreferences = AndroidFactory.getApplicationContext().getSharedPreferences(RCS_PREFS, Activity.MODE_PRIVATE);\r\n\t}\r\n\r\n\t/**\r\n\t * Read a string value in the registry\r\n\t * \r\n\t * @param key Key name to be read\r\n\t * @param defaultValue Default value\r\n\t * @return String\r\n\t */\r\n\tpublic String readString(String key, String defaultValue) {\r\n\t\treturn preferences.getString(key, defaultValue);\r\n\t}\r\n\r\n\t/**\r\n\t * Write a string value in the registry\r\n\t * \r\n\t * @param key Key name to be updated\r\n\t * @param value New value\r\n\t */\r\n\tpublic void writeString(String key, String value) {\r\n\t\tSharedPreferences.Editor editor = preferences.edit();\r\n\t\teditor.putString(key, value);\r\n\t\teditor.commit();\r\n\t}\r\n\r\n\t/**\r\n\t * Read an integer value in the registry\r\n\t * \r\n\t * @param key Key name to be read\r\n\t * @param defaultValue Default value\r\n\t * @return Integer\r\n\t */\r\n\tpublic int readInteger(String key, int defaultValue) {\r\n\t\treturn preferences.getInt(key, defaultValue);\r\n\t}\r\n\r\n\t/**\r\n\t * Write an integer value in the registry\r\n\t * \r\n\t * @param key Key name to be updated\r\n\t * @param value New value\r\n\t */\r\n\tpublic void writeInteger(String key, int value) {\r\n\t\tSharedPreferences.Editor editor = preferences.edit();\r\n\t\teditor.putInt(key, value);\r\n\t\teditor.commit();\t\t\r\n\t}\r\n\r\n\t/**\r\n\t * Read a long value in the registry\r\n\t * \r\n\t * @param key Key name to be read\r\n\t * @param defaultValue Default value\r\n\t * @return Long\r\n\t */\r\n\tpublic long readLong(String key, long defaultValue) {\r\n\t\treturn preferences.getLong(key, defaultValue);\r\n\t}\r\n\r\n\t/**\r\n\t * Write a long value in the registry\r\n\t * \r\n\t * @param key Key name to be updated\r\n\t * @param value New value\r\n\t */\r\n\tpublic void writeLong(String key, long value) {\r\n\t\tSharedPreferences.Editor editor = preferences.edit();\r\n\t\teditor.putLong(key, value);\r\n\t\teditor.commit();\t\t\r\n\t}\r\n\t\r\n\t/**\r\n\t * Read a boolean value in the registry\r\n\t * \r\n\t * @param key Key name to be read\r\n\t * @param defaultValue Default value\r\n\t * @return Boolean\r\n\t */\r\n\tpublic boolean readBoolean(String key, boolean defaultValue) {\r\n\t\treturn preferences.getBoolean(key, defaultValue);\r\n\t}\r\n\r\n\t/**\r\n\t * Write a boolean value in the registry\r\n\t * \r\n\t * @param key Key name to be updated\r\n\t * @param value New value\r\n\t */\r\n\tpublic void writeBoolean(String key, boolean value) {\r\n\t\tSharedPreferences.Editor editor = preferences.edit();\r\n\t\teditor.putBoolean(key, value);\r\n\t\teditor.commit();\r\n\t}\t\r\n\r\n\t/**\r\n\t * Remove a parameter in the registry\r\n\t * \r\n\t * @param key Key name to be removed\r\n\t */\r\n\tpublic void removeParameter(String key) {\r\n\t\tSharedPreferences.Editor editor = preferences.edit();\r\n\t\teditor.remove(key);\r\n\t\teditor.commit();\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/platform/registry/RegistryFactory.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.platform.registry;\r\n\r\nimport com.orangelabs.rcs.platform.FactoryException;\r\n\r\n/**\r\n * Application registry factory\r\n * \r\n * @author jexa7410\r\n */\r\npublic abstract class RegistryFactory {\r\n\t/**\r\n\t * Current platform factory\r\n\t */\r\n\tprivate static RegistryFactory factory = null;\r\n\t\r\n\t/**\r\n\t * Load the factory\r\n\t * \r\n\t * @param classname Factory classname\r\n\t * @throws Exception\r\n\t */\r\n\tpublic static void loadFactory(String classname) throws FactoryException {\r\n\t\tif (factory != null) {\r\n\t\t\treturn;\r\n\t\t}\r\n\t\t\r\n\t\ttry {\r\n\t\t\tfactory = (RegistryFactory)Class.forName(classname).newInstance();\r\n\t\t} catch(Exception e) {\r\n\t\t\tthrow new FactoryException(\"Can't load the factory \" + classname);\r\n\t\t}\r\n\t}\r\n\t\r\n\t/**\r\n\t * Returns the current factory\r\n\t * \r\n\t * @return Factory\r\n\t */\r\n\tpublic static RegistryFactory getFactory() {\r\n\t\treturn factory;\r\n\t}\r\n\t\r\n\t/**\r\n\t * Read a string value in the registry\r\n\t * \r\n\t * @param key Key name to be read\r\n\t * @param defaultValue Default value\r\n\t * @return String\r\n\t */\r\n\tpublic abstract String readString(String key, String defaultValue);\r\n\r\n\t/**\r\n\t * Write a string value in the registry\r\n\t * \r\n\t * @param key Key name to be updated\r\n\t * @param value New value\r\n\t */\r\n\tpublic abstract void writeString(String key, String value);\r\n\r\n\t/**\r\n\t * Read an integer value in the registry\r\n\t * \r\n\t * @param key Key name to be read\r\n\t * @param defaultValue Default value\r\n\t * @return Integer\r\n\t */\r\n\tpublic abstract int readInteger(String key, int defaultValue);\r\n\r\n\t/**\r\n\t * Write an integer value in the registry\r\n\t * \r\n\t * @param key Key name to be updated\r\n\t * @param value New value\r\n\t */\r\n\tpublic abstract void writeInteger(String key, int value);\r\n\r\n\t/**\r\n\t * Read a long value in the registry\r\n\t * \r\n\t * @param key Key name to be read\r\n\t * @param defaultValue Default value\r\n\t * @return Long\r\n\t */\r\n\tpublic abstract long readLong(String key, long defaultValue);\r\n\r\n\t/**\r\n\t * Write a long value in the registry\r\n\t * \r\n\t * @param key Key name to be updated\r\n\t * @param value New value\r\n\t */\r\n\tpublic abstract void writeLong(String key, long value);\r\n\r\n\t/**\r\n\t * Read a boolean value in the registry\r\n\t * \r\n\t * @param key Key name to be read\r\n\t * @param defaultValue Default value\r\n\t * @return Boolean\r\n\t */\r\n\tpublic abstract boolean readBoolean(String key, boolean defaultValue);\r\n\r\n\t/**\r\n\t * Write a boolean value in the registry\r\n\t * \r\n\t * @param key Key name to be updated\r\n\t * @param value New value\r\n\t */\r\n\tpublic abstract void writeBoolean(String key, boolean value);\r\n\t\r\n\t/**\r\n\t * Remove a parameter in the registry\r\n\t * \r\n\t * @param key Key name to be removed\r\n\t */\r\n\tpublic abstract void removeParameter(String key);\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/provider/settings/RcsSettings.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.provider.settings;\r\n\r\nimport com.orangelabs.rcs.service.api.client.capability.Capabilities;\n\nimport android.content.ContentResolver;\nimport android.content.ContentValues;\nimport android.content.Context;\nimport android.database.Cursor;\nimport android.net.Uri;\n\n/**\n * RCS settings\n *\n * @author jexa7410\n */\r\npublic class RcsSettings {\r\n\t/**\r\n\t * Current instance\r\n\t */\r\n\tprivate static RcsSettings instance = null;\r\n\r\n\t/**\r\n\t * Content resolver\r\n\t */\r\n\tprivate ContentResolver cr;\r\n\r\n\t/**\r\n\t * Database URI\r\n\t */\r\n\tprivate Uri databaseUri = RcsSettingsData.CONTENT_URI;\r\n\n    /**\n     * Create instance\n     *\n     * @param ctx Context\n     */\r\n\tpublic static synchronized void createInstance(Context ctx) {\r\n\t\tif (instance == null) {\r\n\t\t\tinstance = new RcsSettings(ctx);\r\n\t\t}\r\n\t}\r\n\r\n\t/**\n     * Returns instance\n     *\n     * @return Instance\n     */\r\n\tpublic static RcsSettings getInstance() {\r\n\t\treturn instance;\r\n\t}\r\n\r\n\t/**\n     * Constructor\n     *\n     * @param ctx Application context\n     */\r\n\tprivate RcsSettings(Context ctx) {\r\n\t\tsuper();\r\n\r\n        this.cr = ctx.getContentResolver();\r\n\t}\r\n\r\n\t/**\n     * Read a parameter\n     *\n     * @param key Key\n     * @return Value\n     */\r\n\tpublic String readParameter(String key) {\r\n\t\tString result = null;\r\n        Cursor c = cr.query(databaseUri, null, RcsSettingsData.KEY_KEY + \"='\" + key + \"'\", null, null);\r\n        if ((c != null) && (c.getCount() > 0)) {\r\n\t        if (c.moveToFirst()) {\r\n\t        \tresult = c.getString(2);\r\n\t        }\r\n\t        c.close();\r\n        }\r\n        return result;\r\n\t}\r\n\r\n\t/**\n     * Write a parameter\n     *\n     * @param key Key\n     * @param value Value\n     */\r\n\tpublic void writeParameter(String key, String value) {\r\n        ContentValues values = new ContentValues();\r\n        values.put(RcsSettingsData.KEY_VALUE, value);\r\n        String where = RcsSettingsData.KEY_KEY + \"='\" + key + \"'\";\r\n        cr.update(databaseUri, values, where, null);\r\n\t}\r\n\n\t/**\n     * Is RCS service activated\n     *\n     * @return Boolean\n     */\r\n\tpublic boolean isServiceActivated() {\r\n\t\tboolean result = false;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.SERVICE_ACTIVATED));\r\n\t\t}\r\n\t\treturn result;\r\n    }\n\r\n\t/**\n     * Set the RCS service activation state\n     *\n     * @param state State\n     */\r\n\tpublic void setServiceActivationState(boolean state) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.SERVICE_ACTIVATED, Boolean.toString(state));\r\n\t\t}\r\n    }\n\r\n\t/**\n     * Is RCS service authorized in roaming\n     *\n     * @return Boolean\n     */\r\n\tpublic boolean isRoamingAuthorized() {\r\n\t\tboolean result = false;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.ROAMING_AUTHORIZED));\r\n\t\t}\r\n\t\treturn result;\r\n    }\n\r\n\t/**\n     * Set the roaming authorization state\n     *\n     * @param state State\n     */\r\n\tpublic void setRoamingAuthorizationState(boolean state) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.ROAMING_AUTHORIZED, Boolean.toString(state));\r\n\t\t}\r\n\t}\r\n\r\n\t/**\n     * Get the ringtone for presence invitation\n     *\n     * @return Ringtone URI or null if there is no ringtone\n     */\r\n\tpublic String getPresenceInvitationRingtone() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = readParameter(RcsSettingsData.PRESENCE_INVITATION_RINGTONE);\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\r\n\t/**\n     * Set the presence invitation ringtone\n     *\n     * @param uri Ringtone URI\n     */\r\n\tpublic void setPresenceInvitationRingtone(String uri) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.PRESENCE_INVITATION_RINGTONE, uri);\r\n\t\t}\r\n\t}\n\n    /**\n     * Is phone vibrate for presence invitation\n     *\n     * @return Boolean\n     */\r\n\tpublic boolean isPhoneVibrateForPresenceInvitation() {\r\n\t\tboolean result = false;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.PRESENCE_INVITATION_VIBRATE));\r\n\t\t}\r\n\t\treturn result;\r\n    }\r\n\r\n\t/**\n     * Set phone vibrate for presence invitation\n     *\n     * @param vibrate Vibrate state\n     */\r\n\tpublic void setPhoneVibrateForPresenceInvitation(boolean vibrate) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.PRESENCE_INVITATION_VIBRATE, Boolean.toString(vibrate));\r\n\t\t}\r\n    }\n\r\n\t/**\n     * Get the ringtone for CSh invitation\n     *\n     * @return Ringtone URI or null if there is no ringtone\n     */\r\n\tpublic String getCShInvitationRingtone() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = readParameter(RcsSettingsData.CSH_INVITATION_RINGTONE);\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\r\n\t/**\n     * Set the CSh invitation ringtone\n     *\n     * @param uri Ringtone URI\n     */\r\n\tpublic void setCShInvitationRingtone(String uri) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.CSH_INVITATION_RINGTONE, uri);\r\n\t\t}\r\n\t}\n\n    /**\n     * Is phone vibrate for CSh invitation\n     *\n     * @return Boolean\n     */\r\n\tpublic boolean isPhoneVibrateForCShInvitation() {\r\n\t\tboolean result = false;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.CSH_INVITATION_VIBRATE));\r\n\t\t}\r\n\t\treturn result;\r\n    }\r\n\r\n\t/**\n     * Set phone vibrate for CSh invitation\n     *\n     * @param vibrate Vibrate state\n     */\r\n\tpublic void setPhoneVibrateForCShInvitation(boolean vibrate) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.CSH_INVITATION_VIBRATE, Boolean.toString(vibrate));\r\n\t\t}\r\n    }\r\n\r\n\t/**\n     * Is phone beep if the CSh available\n     *\n     * @return Boolean\n     */\r\n\tpublic boolean isPhoneBeepIfCShAvailable() {\r\n\t\tboolean result = false;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.CSH_AVAILABLE_BEEP));\r\n\t\t}\r\n\t\treturn result;\r\n    }\n\r\n\t/**\n     * Set phone beep if CSh available\n     *\n     * @param beep Beep state\n     */\r\n\tpublic void setPhoneBeepIfCShAvailable(boolean beep) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.CSH_AVAILABLE_BEEP, Boolean.toString(beep));\r\n\t\t}\r\n    }\n\r\n\t/**\n     * Get the CSh video format\n     *\n     * @return Video format as string\n     */\r\n\tpublic String getCShVideoFormat() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = readParameter(RcsSettingsData.CSH_VIDEO_FORMAT);\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\r\n\t/**\n     * Set the CSh video format\n     *\n     * @param fmt Video format\n     */\r\n\tpublic void setCShVideoFormat(String fmt) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.CSH_VIDEO_FORMAT, fmt);\r\n\t\t}\r\n    }\n\r\n\t/**\n     * Get the CSh video size\n     *\n     * @return Size (e.g. QCIF, QVGA)\n     */\r\n\tpublic String getCShVideoSize() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = readParameter(RcsSettingsData.CSH_VIDEO_SIZE);\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\r\n\t/**\n     * Set the CSh video size\n     *\n     * @param size Video size\n     */\r\n\tpublic void setCShVideoSize(String size) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.CSH_VIDEO_SIZE, size);\r\n\t\t}\r\n    }\n\r\n\t/**\n     * Get the ringtone for file transfer invitation\n     *\n     * @return Ringtone URI or null if there is no ringtone\n     */\r\n\tpublic String getFileTransferInvitationRingtone() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = readParameter(RcsSettingsData.FILETRANSFER_INVITATION_RINGTONE);\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\r\n\t/**\n     * Set the file transfer invitation ringtone\n     *\n     * @param uri Ringtone URI\n     */\r\n\tpublic void setFileTransferInvitationRingtone(String uri) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.FILETRANSFER_INVITATION_RINGTONE, uri);\r\n\t\t}\r\n\t}\n\n    /**\n     * Is phone vibrate for file transfer invitation\n     *\n     * @return Boolean\n     */\r\n\tpublic boolean isPhoneVibrateForFileTransferInvitation() {\r\n\t\tboolean result = false;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.FILETRANSFER_INVITATION_VIBRATE));\r\n\t\t}\r\n\t\treturn result;\r\n    }\r\n\r\n\t/**\n     * Set phone vibrate for file transfer invitation\n     *\n     * @param vibrate Vibrate state\n     */\r\n\tpublic void setPhoneVibrateForFileTransferInvitation(boolean vibrate) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.FILETRANSFER_INVITATION_VIBRATE, Boolean.toString(vibrate));\r\n\t\t}\r\n    }\n\r\n\t/**\n     * Get the ringtone for chat invitation\n     *\n     * @return Ringtone URI or null if there is no ringtone\n     */\r\n\tpublic String getChatInvitationRingtone() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = readParameter(RcsSettingsData.CHAT_INVITATION_RINGTONE);\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\r\n\t/**\n     * Set the chat invitation ringtone\n     *\n     * @param uri Ringtone URI\n     */\r\n\tpublic void setChatInvitationRingtone(String uri) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.CHAT_INVITATION_RINGTONE, uri);\r\n\t\t}\r\n\t}\n\n    /**\n     * Is phone vibrate for chat invitation\n     *\n     * @return Boolean\n     */\r\n\tpublic boolean isPhoneVibrateForChatInvitation() {\r\n\t\tboolean result = false;\r\n\t\tif (instance != null) {\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.CHAT_INVITATION_VIBRATE));\r\n\t\t}\r\n\t\treturn result;\r\n    }\r\n\r\n\t/**\n     * Set phone vibrate for chat invitation\n     *\n     * @param vibrate Vibrate state\n     */\r\n\tpublic void setPhoneVibrateForChatInvitation(boolean vibrate) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.CHAT_INVITATION_VIBRATE, Boolean.toString(vibrate));\r\n\t\t}\r\n\t}\r\n\r\n\t/**\n     * Is auto accept mode for chat invitations activated\n     *\n     * @return Boolean\n     */\r\n\tpublic boolean isAutoAcceptModeForChatInvitation(){\r\n\t\tboolean result = false;\r\n\t\tif (instance != null) {\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.CHAT_INVITATION_AUTO_ACCEPT));\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\r\n\t/**\n     * Set auto accept mode for chat invitations\n     *\n     * @param auto Auto accept mode\n     */\r\n\tpublic void setAutoAcceptModeForChatInvitation(boolean auto) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.CHAT_INVITATION_AUTO_ACCEPT, Boolean.toString(auto));\r\n\t\t}\r\n\t}\n\n    /**\n     * Get the pre-defined freetext 1\n     *\n     * @return String\n     */\r\n\tpublic String getPredefinedFreetext1() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = readParameter(RcsSettingsData.FREETEXT1);\r\n\t\t}\r\n\t\treturn result;\r\n\t}\n\n    /**\n     * Set the pre-defined freetext 1\n     *\n     * @param txt Text\n     */\r\n\tpublic void setPredefinedFreetext1(String txt) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.FREETEXT1, txt);\r\n\t\t}\r\n\t}\n\n    /**\n     * Get the pre-defined freetext 2\n     *\n     * @return String\n     */\r\n\tpublic String getPredefinedFreetext2() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = readParameter(RcsSettingsData.FREETEXT2);\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\r\n\t/**\n     * Set the pre-defined freetext 2\n     *\n     * @param txt Text\n     */\r\n\tpublic void setPredefinedFreetext2(String txt) {\r\n        if (instance != null) {\n            writeParameter(RcsSettingsData.FREETEXT2, txt);\n        }\r\n\t}\n\n    /**\n     * Get the pre-defined freetext 3\n     *\n     * @return String\n     */\r\n\tpublic String getPredefinedFreetext3() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = readParameter(RcsSettingsData.FREETEXT3);\r\n\t\t}\r\n\t\treturn result;\r\n\t}\n\n    /**\n     * Set the pre-defined freetext 3\n     *\n     * @param txt Text\n     */\r\n\tpublic void setPredefinedFreetext3(String txt) {\r\n        if (instance != null) {\n            writeParameter(RcsSettingsData.FREETEXT3, txt);\n        }\r\n\t}\n\n    /**\n     * Get the pre-defined freetext 4\n     *\n     * @return String\n     */\r\n\tpublic String getPredefinedFreetext4() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = readParameter(RcsSettingsData.FREETEXT4);\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\r\n\t/**\n     * Set the pre-defined freetext 4\n     *\n     * @param txt Text\n     */\r\n\tpublic void setPredefinedFreetext4(String txt) {\r\n        if (instance != null) {\n            writeParameter(RcsSettingsData.FREETEXT4, txt);\n        }\r\n\t}\n\n    /**\n     * Get user profile username (i.e. username part of the IMPU)\n     *\n     * @return Username part of SIP-URI\n     */\r\n\tpublic String getUserProfileImsUserName() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = readParameter(RcsSettingsData.USERPROFILE_IMS_USERNAME);\r\n\t\t}\r\n\t\treturn result;\r\n    }\r\n\r\n\t/**\n     * Set user profile IMS username (i.e. username part of the IMPU)\n     *\n     * @param value Value\n     */\r\n\tpublic void setUserProfileImsUserName(String value) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.USERPROFILE_IMS_USERNAME, value);\r\n\t\t}\r\n    }\r\n\r\n\t/**\n     * Get user profile IMS display name associated to IMPU\n     *\n     * @return String\n     */\r\n\tpublic String getUserProfileImsDisplayName() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = readParameter(RcsSettingsData.USERPROFILE_IMS_DISPLAY_NAME);\r\n\t\t}\r\n\t\treturn result;\r\n    }\r\n\r\n\t/**\n     * Set user profile IMS display name associated to IMPU\n     *\n     * @param value Value\n     */\r\n\tpublic void setUserProfileImsDisplayName(String value) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.USERPROFILE_IMS_DISPLAY_NAME, value);\r\n\t\t}\r\n    }\r\n\r\n\t/**\n     * Get user profile IMS private Id (i.e. IMPI)\n     *\n     * @return SIP-URI\n     */\r\n\tpublic String getUserProfileImsPrivateId() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = readParameter(RcsSettingsData.USERPROFILE_IMS_PRIVATE_ID);\r\n\t\t}\r\n\t\treturn result;\r\n    }\r\n\r\n\t/**\n     * Set user profile IMS private Id (i.e. IMPI)\n     *\n     * @param uri SIP-URI\n     */\r\n\tpublic void setUserProfileImsPrivateId(String uri) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.USERPROFILE_IMS_PRIVATE_ID, uri);\r\n\t\t}\r\n    }\r\n\r\n\t/**\n     * Get user profile IMS password\n     *\n     * @return String\n     */\r\n\tpublic String getUserProfileImsPassword() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = readParameter(RcsSettingsData.USERPROFILE_IMS_PASSWORD);\r\n\t\t}\r\n\t\treturn result;\r\n    }\r\n\r\n\t/**\n     * Set user profile IMS password\n     *\n     * @param pwd Password\n     */\r\n\tpublic void setUserProfileImsPassword(String pwd) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.USERPROFILE_IMS_PASSWORD, pwd);\r\n\t\t}\r\n    }\r\n\r\n\t/**\n     * Get user profile IMS home domain\n     *\n     * @return Domain\n     */\r\n\tpublic String getUserProfileImsDomain() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = readParameter(RcsSettingsData.USERPROFILE_IMS_HOME_DOMAIN);\r\n\t\t}\r\n\t\treturn result;\r\n    }\r\n\r\n\t/**\n     * Set user profile IMS home domain\n     *\n     * @param domain Domain\n     */\r\n\tpublic void setUserProfileImsDomain(String domain) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.USERPROFILE_IMS_HOME_DOMAIN, domain);\r\n\t\t}\r\n\t}\n\n    /**\n     * Get IMS proxy address for mobile access\n     *\n     * @return Address\n     */\r\n\tpublic String getImsProxyAddrForMobile() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = readParameter(RcsSettingsData.IMS_PROXY_ADDR_MOBILE);\r\n\t\t}\r\n\t\treturn result;\r\n    }\r\n\r\n\t/**\n     * Set IMS proxy address for mobile access\n     *\n     * @param addr Address\n     */\r\n\tpublic void setImsProxyAddrForMobile(String addr) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.IMS_PROXY_ADDR_MOBILE, addr);\r\n\t\t}\r\n\t}\n\n    /**\n     * Get IMS proxy port for mobile access\n     *\n     * @return Port\n     */\n\tpublic int getImsProxyPortForMobile() {\n\t\tint result = 5060;\n\t\tif (instance != null) {\n\t\t\ttry {\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.IMS_PROXY_PORT_MOBILE));\n\t\t\t} catch(Exception e) {}\n\t\t}\n\t\treturn result;\n    }\n\n\t/**\n     * Set IMS proxy port for mobile access\n     *\n     * @param port Port number\n     */\n\tpublic void setImsProxyPortForMobile(int port) {\n\t\tif (instance != null) {\n\t\t\twriteParameter(RcsSettingsData.IMS_PROXY_PORT_MOBILE, \"\" + port);\n\t\t}\n\t}\n\n\t/**\n     * Get IMS proxy address for Wi-Fi access\n     *\n     * @return Address\n     */\n\tpublic String getImsProxyAddrForWifi() {\n\t\tString result = null;\n\t\tif (instance != null) {\n\t\t\tresult = readParameter(RcsSettingsData.IMS_PROXY_ADDR_WIFI);\n\t\t}\n\t\treturn result;\n    }\n\n\t/**\n     * Set IMS proxy address for Wi-Fi access\n     *\n     * @param addr Address\n     */\n\tpublic void setImsProxyAddrForWifi(String addr) {\n\t\tif (instance != null) {\n\t\t\twriteParameter(RcsSettingsData.IMS_PROXY_ADDR_WIFI, addr);\n\t\t}\n\t}\n\n\t/**\n     * Get IMS proxy port for Wi-Fi access\n     *\n     * @return Port\n     */\n\tpublic int getImsProxyPortForWifi() {\n\t\tint result = 5060;\n\t\tif (instance != null) {\n\t\t\ttry {\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.IMS_PROXY_PORT_WIFI));\n\t\t\t} catch(Exception e) {}\n\t\t}\n\t\treturn result;\n    }\n\n\t/**\n     * Set IMS proxy port for Wi-Fi access\n     *\n     * @param port Port number\n     */\n\tpublic void setImsProxyPortForWifi(int port) {\n\t\tif (instance != null) {\n\t\t\twriteParameter(RcsSettingsData.IMS_PROXY_PORT_WIFI, \"\" + port);\n\t\t}\n\t}\n\n\t/**\n     * Get XDM server address\n     *\n     * @return Address as <host>:<port>/<root>\n     */\r\n\tpublic String getXdmServer() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = readParameter(RcsSettingsData.XDM_SERVER);\r\n\t\t}\r\n\t\treturn result;\r\n    }\r\n\r\n\t/**\n     * Set XDM server address\n     *\n     * @param addr Address as <host>:<port>/<root>\n     */\r\n\tpublic void setXdmServer(String addr) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.XDM_SERVER, addr);\r\n\t\t}\r\n\t}\n\n    /**\n     * Get XDM server login\n     *\n     * @return String value\n     */\r\n\tpublic String getXdmLogin() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = readParameter(RcsSettingsData.XDM_LOGIN);\r\n\t\t}\r\n\t\treturn result;\r\n    }\r\n\r\n\t/**\n     * Set XDM server login\n     *\n     * @param value Value\n     */\r\n\tpublic void setXdmLogin(String value) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.XDM_LOGIN, value);\r\n\t\t}\r\n\t}\n\n    /**\n     * Get XDM server password\n     *\n     * @return String value\n     */\r\n\tpublic String getXdmPassword() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = readParameter(RcsSettingsData.XDM_PASSWORD);\r\n\t\t}\r\n\t\treturn result;\r\n    }\r\n\r\n\t/**\n     * Set XDM server password\n     *\n     * @param value Value\n     */\r\n\tpublic void setXdmPassword(String value) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.XDM_PASSWORD, value);\r\n\t\t}\r\n\t}\n\n    /**\n     * Get IM conference URI\n     *\n     * @return SIP-URI\n     */\r\n\tpublic String getImConferenceUri() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = readParameter(RcsSettingsData.IM_CONF_URI);\r\n\t\t}\r\n\t\treturn result;\r\n    }\r\n\r\n\t/**\n     * Set IM conference URI\n     *\n     * @param uri SIP-URI\n     */\r\n\tpublic void setImConferenceUri(String uri) {\r\n\t\tif (instance != null) {\r\n\t\t\twriteParameter(RcsSettingsData.IM_CONF_URI, uri);\r\n\t\t}\r\n\t}\r\n\n    /**\n     * Get end user confirmation request URI\n     *\n     * @return SIP-URI\n     */\n\tpublic String getEndUserConfirmationRequestUri() {\n\t\tString result = null;\n\t\tif (instance != null) {\n\t\t\tresult = readParameter(RcsSettingsData.ENDUSER_CONFIRMATION_URI);\n\t\t}\n\t\treturn result;\n    }\n\n\t/**\n     * Set end user confirmation request\n     *\n     * @param uri SIP-URI\n     */\n\tpublic void setEndUserConfirmationRequestUri(String uri) {\n\t\tif (instance != null) {\n\t\t\twriteParameter(RcsSettingsData.ENDUSER_CONFIRMATION_URI, uri);\n\t\t}\n\t}\n\t\n\t/**\n     * Get country code\n     *\n     * @return Country code\n     */\n\tpublic String getCountryCode() {\n\t\tString result = null;\n\t\tif (instance != null) {\n\t\t\tresult = readParameter(RcsSettingsData.COUNTRY_CODE);\n\t\t}\n\t\treturn result;\n    }\n\n\t/**\n     * Set country code\n     *\n     * @param code Country code\n     */\n\tpublic void setCountryCode(String code) {\n\t\tif (instance != null) {\n\t\t\twriteParameter(RcsSettingsData.COUNTRY_CODE, code);\n\t\t}\n    }\n\n\t/**\n     * Get country area code\n     *\n     * @return Area code\n     */\n\tpublic String getCountryAreaCode() {\n\t\tString result = null;\n\t\tif (instance != null) {\n\t\t\tresult = readParameter(RcsSettingsData.COUNTRY_AREA_CODE);\n\t\t}\n\t\treturn result;\n    }\n\n\t/**\n     * Set country area code\n     *\n     * @param code Area code\n     */\n\tpublic void setCountryAreaCode(String code) {\n\t\tif (instance != null) {\n\t\t\twriteParameter(RcsSettingsData.COUNTRY_AREA_CODE, code);\n\t\t}\n    }\n\n\t/**\n     * Get my capabilities\n     *\n     * @return capability\n     */\n\tpublic Capabilities getMyCapabilities(){\n\t\tCapabilities capabilities = new Capabilities();\n\n\t\t// Add default capabilities\n\t\tcapabilities.setCsVideoSupport(isCsVideoSupported());\n\t\tcapabilities.setFileTransferSupport(isFileTransferSupported());\n\t\tcapabilities.setImageSharingSupport(isImageSharingSupported());\n\t\tcapabilities.setImSessionSupport(isImSessionSupported());\n\t\tcapabilities.setPresenceDiscoverySupport(isPresenceDiscoverySupported());\n\t\tcapabilities.setSocialPresenceSupport(isSocialPresenceSupported());\n\t\tcapabilities.setVideoSharingSupport(isVideoSharingSupported());\n\t\tcapabilities.setTimestamp(System.currentTimeMillis());\n\n\t\t// Add extensions\n\t\tString exts = getSupportedRcsExtensions();\n\t\tif ((exts != null) && (exts.length() > 0)) {\n\t\t\tString[] ext = exts.split(\",\");\n\t\t\tfor(int i=0; i < ext.length; i++) {\n\t\t\t\tcapabilities.addSupportedExtension(ext[i]);\n\t\t\t}\n\t\t}\n\n\t\treturn capabilities;\n\t}\n\r\n\t/**\n     * Get max photo-icon size\n     *\n     * @return Size in kilobytes\n     */\r\n\tpublic int getMaxPhotoIconSize() {\r\n\t\tint result = 256;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.MAX_PHOTO_ICON_SIZE));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\n\n    /**\n     * Get max freetext length\n     *\n     * @return Number of char\n     */\r\n\tpublic int getMaxFreetextLength() {\r\n\t\tint result = 100;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.MAX_FREETXT_LENGTH));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\n\n    /**\n     * Get max number of participants in a group chat\n     *\n     * @return Number of participants\n     */\r\n\tpublic int getMaxChatParticipants() {\r\n\t\tint result = 5;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.MAX_CHAT_PARTICIPANTS));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\r\n\t/**\n     * Get max length of a chat message\n     *\n     * @return Number of char\n     */\r\n\tpublic int getMaxChatMessageLength() {\r\n\t\tint result = 100;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.MAX_CHAT_MSG_LENGTH));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\n\n    /**\n     * Get idle duration of a chat session\n     *\n     * @return Duration in seconds\n     */\r\n\tpublic int getChatIdleDuration() {\r\n\t\tint result = 120;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.CHAT_IDLE_DURATION));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\n\n    /**\n     * Get max file transfer size\n     *\n     * @return Size in kilobytes\n     */\r\n\tpublic int getMaxFileTransferSize() {\r\n\t\tint result = 2048;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.MAX_FILE_TRANSFER_SIZE));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\n\n    /**\n     * Get warning threshold for max file transfer size\n     *\n     * @return Size in kilobytes\n     */\n\tpublic int getWarningMaxFileTransferSize() {\n\t\tint result = 2048;\n\t\tif (instance != null) {\n\t\t\ttry {\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.WARN_FILE_TRANSFER_SIZE));\n\t\t\t} catch(Exception e) {}\n\t\t}\n\t\treturn result;\n\t}\n\n\t/**\n     * Get max image share size\n     *\n     * @return Size in kilobytes\n     */\r\n\tpublic int getMaxImageSharingSize() {\r\n\t\tint result = 2048;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.MAX_IMAGE_SHARE_SIZE));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\n\n    /**\n     * Get max duration of a video share\n     *\n     * @return Duration in seconds\n     */\r\n\tpublic int getMaxVideoShareDuration() {\r\n\t\tint result = 600;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.MAX_VIDEO_SHARE_DURATION));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\n\n    /**\n     * Get max number of simultaneous chat sessions\n     *\n     * @return Number of sessions\n     */\r\n\tpublic int getMaxChatSessions() {\r\n\t\tint result = 1;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.MAX_CHAT_SESSIONS));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\n\n    /**\n     * Get max number of simultaneous file transfer sessions\n     *\n     * @return Number of sessions\n     */\r\n\tpublic int getMaxFileTransferSessions() {\r\n\t\tint result = 1;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.MAX_FILE_TRANSFER_SESSIONS));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\r\n\t/**\n     * Is SMS fallback service activated\n     *\n     * @return Boolean\n     */\r\n\tpublic boolean isSmsFallbackServiceActivated() {\r\n\t\tboolean result = false;\r\n\t\tif (instance != null) {\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.SMS_FALLBACK_SERVICE));\n\t\t}\r\n\t\treturn result;\r\n\t}\n\n\t/**\n     * Is Store & Forward service warning activated\n     *\n     * @return Boolean\n     */\n\tpublic boolean isStoreForwardWarningActivated() {\n\t\tboolean result = false;\n\t\tif (instance != null) {\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.WARN_SF_SERVICE));\n\t\t}\n\t\treturn result;\n\t}\n\r\n\t/**\n     * Get IM session start mode\n     *\n     * @return Integer (1: The 200 OK is sent when the receiver starts to type a message back\n     * in the chat window. 2: The 200 OK is sent when the receiver sends a message)\n     */\n\tpublic int getImSessionStartMode() {\n\t\tint result = 1;\n\t\tif (instance != null) {\n\t\t\ttry {\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.IM_SESSION_START));\n\t\t\t} catch(Exception e) {}\n\t\t}\n\t\treturn result;\n\t}\n\n\t/**\n\t * Get max number of entries per contact in the chat log\n\t * \n\t * @return Number\n\t */\n\tpublic int getMaxChatLogEntriesPerContact() {\n\t\tint result = 200;\n\t\tif (instance != null) {\n\t\t\ttry {\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.MAX_CHAT_LOG_ENTRIES));\n\t\t\t} catch(Exception e) {}\n\t\t}\n\t\treturn result;\n\t}\n\n\t/**\n\t * Get max number of entries per contact in the richcall log\n\t * \n\t * @return Number\n\t */\n\tpublic int getMaxRichcallLogEntriesPerContact() {\n\t\tint result = 200;\n\t\tif (instance != null) {\n\t\t\ttry {\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.MAX_RICHCALL_LOG_ENTRIES));\n\t\t\t} catch(Exception e) {}\n\t\t}\n\t\treturn result;\n\t}\n\t\n    /**\n     * Get polling period used before each IMS service check (e.g. test subscription state for presence service)\n     *\n     * @return Period in seconds\n     */\r\n\tpublic int getImsServicePollingPeriod(){\r\n\t\tint result = 300;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.IMS_SERVICE_POLLING_PERIOD));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\r\n\t/**\n     * Get default SIP listening port\n     *\n     * @return Port\n     */\r\n\tpublic int getSipListeningPort() {\r\n\t\tint result = 5060;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.SIP_DEFAULT_PORT));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\n\n    /**\n     * Get default SIP protocol for mobile\n     * \n     * @return Protocol (udp | tcp | tls)\n     */\r\n\tpublic String getSipDefaultProtocolForMobile() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n            result = readParameter(RcsSettingsData.SIP_DEFAULT_PROTOCOL_FOR_MOBILE);\r\n\t\t}\r\n\t\treturn result;\r\n\t}\n\n    /**\n     * Get default SIP protocol for wifi\n     * \n     * @return Protocol (udp | tcp | tls)\n     */\n    public String getSipDefaultProtocolForWifi() {\n        String result = null;\n        if (instance != null) {\n            result = readParameter(RcsSettingsData.SIP_DEFAULT_PROTOCOL_FOR_WIFI);\n        }\n        return result;\n    }\n\n    /**\n     * Get TLS Certificate root\n     * \n     * @return Path of the certificate\n     */\n    public String getTlsCertificateRoot() {\n        String result = null;\n        if (instance != null) {\n            result = readParameter(RcsSettingsData.TLS_CERTIFICATE_ROOT);\n        }\n        return result;\n    }\n\n    /**\n     * Get TLS Certificate intermediate\n     * \n     * @return Path of the certificate\n     */\n    public String getTlsCertificateIntermediate() {\n        String result = null;\n        if (instance != null) {\n            result = readParameter(RcsSettingsData.TLS_CERTIFICATE_INTERMEDIATE);\n        }\n        return result;\n    }\n\n    /**\n     * Get SIP transaction timeout used to wait SIP response\n     * \n     * @return Timeout in seconds\n     */\r\n\tpublic int getSipTransactionTimeout() {\r\n\t\tint result = 30;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.SIP_TRANSACTION_TIMEOUT));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\r\n\t/**\n     * Get default MSRP port\n     *\n     * @return Port\n     */\r\n\tpublic int getDefaultMsrpPort() {\r\n\t\tint result = 20000;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.MSRP_DEFAULT_PORT));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\r\n\t/**\n     * Get default RTP port\n     *\n     * @return Port\n     */\r\n\tpublic int getDefaultRtpPort() {\r\n\t\tint result = 10000;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.RTP_DEFAULT_PORT));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\n\n    /**\n     * Get MSRP transaction timeout used to wait MSRP response\n     *\n     * @return Timeout in seconds\n     */\r\n\tpublic int getMsrpTransactionTimeout() {\r\n\t\tint result = 5;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.MSRP_TRANSACTION_TIMEOUT));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\r\n\t/**\n     * Get default expire period for REGISTER\n     *\n     * @return Period in seconds\n     */\r\n\tpublic int getRegisterExpirePeriod() {\r\n\t\tint result = 3600;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.REGISTER_EXPIRE_PERIOD));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\r\n\t/**\n     * Get registration retry base time\n     *\n     * @return Time in seconds\n     */\n\tpublic int getRegisterRetryBaseTime() {\n\t\tint result = 30;\n\t\tif (instance != null) {\n\t\t\ttry {\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.REGISTER_RETRY_BASE_TIME));\n\t\t\t} catch(Exception e) {}\n\t\t}\n\t\treturn result;\n\t}\n\n\t/**\n     * Get registration retry max time\n     *\n     * @return Time in seconds\n     */\n\tpublic int getRegisterRetryMaxTime() {\n\t\tint result = 1800;\n\t\tif (instance != null) {\n\t\t\ttry {\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.REGISTER_RETRY_MAX_TIME));\n\t\t\t} catch(Exception e) {}\n\t\t}\n\t\treturn result;\n\t}\n\n\t/**\n     * Get default expire period for PUBLISH\n     *\n     * @return Period in seconds\n     */\r\n\tpublic int getPublishExpirePeriod() {\r\n\t\tint result = 3600;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.PUBLISH_EXPIRE_PERIOD));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\n\t/**\n     * Get revoke timeout before to unrevoke a revoked contact\n     *\n     * @return Timeout in seconds\n     */\r\n\tpublic int getRevokeTimeout() {\r\n\t\tint result = 300;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.REVOKE_TIMEOUT));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\r\n\t/**\n     * Get IMS authentication procedure for mobile access\n     *\n     * @return Authentication procedure\n     */\r\n\tpublic String getImsAuhtenticationProcedureForMobile() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = readParameter(RcsSettingsData.IMS_AUTHENT_PROCEDURE_MOBILE);\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\n\t/**\n     * Get IMS authentication procedure for Wi-Fi access\n     *\n     * @return Authentication procedure\n     */\n\tpublic String getImsAuhtenticationProcedureForWifi() {\n\t\tString result = null;\n\t\tif (instance != null) {\n\t\t\tresult = readParameter(RcsSettingsData.IMS_AUTHENT_PROCEDURE_WIFI);\n\t\t}\n\t\treturn result;\n\t}\n\n    /**\n     * Is Tel-URI format used\n     *\n     * @return Boolean\n     */\r\n\tpublic boolean isTelUriFormatUsed() {\r\n\t\tboolean result = false;\r\n\t\tif (instance != null) {\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.TEL_URI_FORMAT));\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\r\n\t/**\n     * Get ringing period\n     *\n     * @return Period in seconds\n     */\r\n\tpublic int getRingingPeriod() {\r\n\t\tint result = 120;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.RINGING_SESSION_PERIOD));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\r\n\t/**\n     * Get default expire period for SUBSCRIBE\n     *\n     * @return Period in seconds\n     */\r\n\tpublic int getSubscribeExpirePeriod() {\r\n\t\tint result = 3600;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.SUBSCRIBE_EXPIRE_PERIOD));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\r\n\t/**\n     * Get \"Is-composing\" timeout for chat service\n     *\n     * @return Timer in seconds\n     */\r\n\tpublic int getIsComposingTimeout() {\r\n\t\tint result = 15;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.IS_COMPOSING_TIMEOUT));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\r\n\t/**\n     * Get default expire period for INVITE (session refresh)\n     *\n     * @return Period in seconds\n     */\r\n\tpublic int getSessionRefreshExpirePeriod() {\r\n\t\tint result = 3600;\r\n\t\tif (instance != null) {\r\n\t\t\ttry {\r\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.SESSION_REFRESH_EXPIRE_PERIOD));\r\n\t\t\t} catch(Exception e) {}\r\n\t\t}\r\n\t\treturn result;\r\n\t}\n\n    /**\n     * Is permanente state mode activated\n     *\n     * @return Boolean\n     */\r\n\tpublic boolean isPermanentStateModeActivated() {\r\n\t\tboolean result = false;\r\n\t\tif (instance != null) {\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.PERMANENT_STATE_MODE));\r\n\t\t}\r\n\t\treturn result;\r\n\t}\n\n    /**\n     * Is trace activated\n     *\n     * @return Boolean\n     */\r\n\tpublic boolean isTraceActivated() {\r\n\t\tboolean result = false;\r\n\t\tif (instance != null) {\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.TRACE_ACTIVATED));\r\n\t\t}\r\n\t\treturn result;\r\n\t}\r\n\r\n\t/**\n     * Get trace level\n     *\n     * @return trace level\n     */\r\n\tpublic String getTraceLevel() {\r\n\t\tString result = null;\r\n\t\tif (instance != null) {\r\n\t\t\tresult = readParameter(RcsSettingsData.TRACE_LEVEL);\r\n\t\t}\r\n\t\treturn result;\r\n\t}\n\n    /**\n     * Is media trace activated\n     *\n     * @return Boolean\n     */\r\n\tpublic boolean isSipTraceActivated() {\r\n\t\tboolean result = false;\r\n\t\tif (instance != null) {\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.SIP_TRACE_ACTIVATED));\r\n\t\t}\r\n\t\treturn result;\r\n\t}\n\n    /**\n     * Get SIP trace file\n     *\n     * @return SIP trace file\n     */\n    public String getSipTraceFile() {\n        String result = \"/sdcard/sip.txt\";\n        if (instance != null) {\n            try {\n                result = readParameter(RcsSettingsData.SIP_TRACE_FILE);\n            } catch(Exception e) {}\n        }\n        return result;\n    }\n\t\n    /**\n     * Is media trace activated\n     *\n     * @return Boolean\n     */\r\n\tpublic boolean isMediaTraceActivated() {\r\n\t\tboolean result = false;\r\n\t\tif (instance != null) {\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.MEDIA_TRACE_ACTIVATED));\r\n\t\t}\r\n\t\treturn result;\r\n\t}\n\n    /**\n     * Get capability refresh timeout used to avoid too many requests in a short time\n     *\n     * @return Timeout in seconds\n     */\n\tpublic int getCapabilityRefreshTimeout() {\n\t\tint result = 1;\n\t\tif (instance != null) {\n\t\t\ttry {\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.CAPABILITY_REFRESH_TIMEOUT));\n\t\t\t} catch(Exception e) {}\n\t\t}\n\t\treturn result;\n\t}\n\n\t/**\n     * Get capability expiry timeout used to decide when to refresh contact capabilities\n     *\n     * @return Timeout in seconds\n     */\n\tpublic int getCapabilityExpiryTimeout() {\n\t\tint result = 3600;\n\t\tif (instance != null) {\n\t\t\ttry {\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.CAPABILITY_EXPIRY_TIMEOUT));\n\t\t\t} catch(Exception e) {}\n\t\t}\n\t\treturn result;\n\t}\n\n    /**\n     * Get capability polling period used to refresh contacts capabilities\n     *\n     * @return Timeout in seconds\n     */\n\tpublic int getCapabilityPollingPeriod() {\n\t\tint result = 3600;\n\t\tif (instance != null) {\n\t\t\ttry {\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.CAPABILITY_POLLING_PERIOD));\n\t\t\t} catch(Exception e) {}\n\t\t}\n\t\treturn result;\n\t}\n\n    /**\n     * Is CS video supported\n     *\n     * @return Boolean\n     */\n\tpublic boolean isCsVideoSupported() {\n\t\tboolean result = false;\n\t\tif (instance != null) {\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.CAPABILITY_CS_VIDEO));\n\t\t}\n\t\treturn result;\n\t}\n\n\t/**\n     * Is file transfer supported\n     *\n     * @return Boolean\n     */\n\tpublic boolean isFileTransferSupported() {\n\t\tboolean result = false;\n\t\tif (instance != null) {\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.CAPABILITY_FILE_TRANSFER));\n\t\t}\n\t\treturn result;\n\t}\n\n\t/**\n     * Is IM session supported\n     *\n     * @return Boolean\n     */\n\tpublic boolean isImSessionSupported() {\n\t\tboolean result = false;\n\t\tif (instance != null) {\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.CAPABILITY_IM_SESSION));\n\t\t}\n\t\treturn result;\n\t}\n\n\t/**\n     * Is image sharing supported\n     *\n     * @return Boolean\n     */\n\tpublic boolean isImageSharingSupported() {\n\t\tboolean result = false;\n\t\tif (instance != null) {\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.CAPABILITY_IMAGE_SHARING));\n\t\t}\n\t\treturn result;\n\t}\n\n\t/**\n     * Is video sharing supported\n     *\n     * @return Boolean\n     */\n\tpublic boolean isVideoSharingSupported() {\n\t\tboolean result = false;\n\t\tif (instance != null) {\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.CAPABILITY_VIDEO_SHARING));\n\t\t}\n\t\treturn result;\n\t}\n\n\t/**\n     * Is presence discovery supported\n     *\n     * @return Boolean\n     */\n\tpublic boolean isPresenceDiscoverySupported() {\n\t\tboolean result = false;\n\t\tif (instance != null) {\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.CAPABILITY_PRESENCE_DISCOVERY));\n\t\t}\n\t\treturn result;\n\t}\n\n    /**\n     * Is social presence supported\n     *\n     * @return Boolean\n     */\n\tpublic boolean isSocialPresenceSupported() {\n\t\tboolean result = false;\n\t\tif (instance != null) {\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.CAPABILITY_SOCIAL_PRESENCE));\n\t\t}\n\t\treturn result;\n\t}\n\n\t/**\n     * Get supported RCS extensions\n     *\n     * @return List of extensions (semicolon separated)\n     */\n\tpublic String getSupportedRcsExtensions() {\n\t\tString result = null;\n\t\tif (instance != null) {\n\t\t\treturn readParameter(RcsSettingsData.CAPABILITY_RCS_EXTENSIONS);\n\t\t}\n\t\treturn result;\n    }\n\n\t/**\n     * Set supported RCS extensions\n     *\n     * @param extensions List of extensions (semicolon separated)\n     */\n\tpublic void setSupportedRcsExtensions(String extensions) {\n\t\tif (instance != null) {\n\t\t\twriteParameter(RcsSettingsData.CAPABILITY_RCS_EXTENSIONS, extensions);\n\t\t}\n    }\n\n\t/**\n     * Is IM always-on thanks to the Store & Forward functionality\n     *\n     * @return Boolean\n     */\n\tpublic boolean isImAlwaysOn() {\n\t\tboolean result = false;\n\t\tif (instance != null) {\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.IM_CAPABILITY_ALWAYS_ON));\n\t\t}\n\t\treturn result;\n\t}\n\n\t/**\n     * Is IM reports activated\n     *\n     * @return Boolean\n     */\n\tpublic boolean isImReportsActivated() {\n\t\tboolean result = false;\n\t\tif (instance != null) {\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.IM_USE_REPORTS));\n\t\t}\n\t\treturn result;\n\t}\n\n\t/**\n     * Get network access\n     *\n     * @return Network type\n     */\n\tpublic int getNetworkAccess() {\n\t\tint result = RcsSettingsData.ANY_ACCESS;\n\t\tif (instance != null) {\n\t\t\ttry {\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.NETWORK_ACCESS));\n\t\t\t} catch(Exception e) {}\n\t\t}\n\t\treturn result;\n\t}\n\n    /**\n     * Get SIP timer T1\n     *\n     * @return Timer in milliseconds\n     */\n\tpublic int getSipTimerT1() {\n\t\tint result = 2000;\n\t\tif (instance != null) {\n\t\t\ttry {\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.SIP_TIMER_T1));\n\t\t\t} catch(Exception e) {}\n\t\t}\n\t\treturn result;\n\t}\n\n    /**\n     * Get SIP timer T2\n     *\n     * @return Timer in milliseconds\n     */\n\tpublic int getSipTimerT2() {\n\t\tint result = 16000;\n\t\tif (instance != null) {\n\t\t\ttry {\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.SIP_TIMER_T2));\n\t\t\t} catch(Exception e) {}\n\t\t}\n\t\treturn result;\n\t}\n\n    /**\n     * Get SIP timer T4\n     *\n     * @return Timer in milliseconds\n     */\n\tpublic int getSipTimerT4() {\n\t\tint result = 17000;\n\t\tif (instance != null) {\n\t\t\ttry {\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.SIP_TIMER_T4));\n\t\t\t} catch(Exception e) {}\n\t\t}\n\t\treturn result;\n\t}\n\n\t/**\n     * Is SIP keep-alive enabled\n     *\n     * @return Boolean\n     */\n\tpublic boolean isSipKeepAliveEnabled() {\n\t\tboolean result = true;\n\t\tif (instance != null) {\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.SIP_KEEP_ALIVE));\n\t\t}\n\t\treturn result;\n\t}\n\n    /**\n     * Get SIP keep-alive period\n     *\n     * @return Period in seconds\n     */\n\tpublic int getSipKeepAlivePeriod() {\n\t\tint result = 60;\n\t\tif (instance != null) {\n\t\t\ttry {\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.SIP_KEEP_ALIVE_PERIOD));\n\t\t\t} catch(Exception e) {}\n\t\t}\n\t\treturn result;\n    }\n\n\t/**\n     * Get APN used to connect to RCS platform\n     *\n     * @return APN (null means any APN may be used to connect to RCS)\n     */\n\tpublic String getNetworkApn() {\n\t\tString result = null;\n\t\tif (instance != null) {\n\t\t\tresult = readParameter(RcsSettingsData.RCS_APN);\n\t\t}\n\t\treturn result;\n    }\n\n\t/**\n     * Get operator authorized to connect to RCS platform\n     *\n     * @return SIM operator name (null means any SIM operator is authorized to connect to RCS)\n     */\n\tpublic String getNetworkOperator() {\n\t\tString result = null;\n\t\tif (instance != null) {\n\t\t\tresult = readParameter(RcsSettingsData.RCS_OPERATOR);\n\t\t}\n\t\treturn result;\n    }\n\n\t/**\n     * Is GRUU supported\n     *\n     * @return Boolean\n     */\n\tpublic boolean isGruuSupported() {\n\t\tboolean result = true;\n\t\tif (instance != null) {\n\t\t\tresult = Boolean.parseBoolean(readParameter(RcsSettingsData.GRUU));\n\t\t}\n\t\treturn result;\n\t}\n\t\n    /**\n     * Is CPU Always_on activated\n     *\n     * @return Boolean\n     */\n    public boolean isCpuAlwaysOn() {\n        boolean result = false;\n        if (instance != null) {\n            result = Boolean.parseBoolean(readParameter(RcsSettingsData.CPU_ALWAYS_ON));\n        }\n        return result;\n    }\n\n\t/**\n     * Get auto configuration mode\n     *\n     * @return Mode\n     */\n\tpublic int getAutoConfigMode() {\n\t\tint result = RcsSettingsData.NO_AUTO_CONFIG;\n\t\tif (instance != null) {\n\t\t\ttry {\n\t\t\t\tresult = Integer.parseInt(readParameter(RcsSettingsData.AUTO_CONFIG_MODE));\n\t\t\t} catch(Exception e) {}\n\t\t}\n\t\treturn result;\n\t}\n\n    /**\n     * Remove user profile information\n     */\n    public void removeUserProfile() {\n        setServiceActivationState(false);\n        setUserProfileImsUserName(\"\");\n        setUserProfileImsDomain(\"\");\n        setUserProfileImsPassword(\"\");\n        setImsProxyAddrForMobile(\"\");\n        setImsProxyPortForMobile(5060);\n        setImsProxyAddrForWifi(\"\");\n        setImsProxyPortForWifi(5060);\n        setUserProfileImsDisplayName(\"\");\n        setUserProfileImsPrivateId(\"\");\n        setXdmLogin(\"\");\n        setXdmPassword(\"\");\n        setXdmServer(\"\");\n    }\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/provider/settings/RcsSettingsData.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.provider.settings;\r\n\r\nimport android.net.ConnectivityManager;\nimport android.net.Uri;\n\n/**\n * RCS settings data constants\n *\n * @author jexa7410\n */\r\npublic class RcsSettingsData {\r\n\t/**\r\n\t * Database URI\r\n\t */\r\n    static final Uri CONTENT_URI = Uri.parse(\"content://com.orangelabs.rcs.settings/settings\");\n\n\t/**\r\n\t * Column name\r\n\t */\r\n\tstatic final String KEY_ID = \"_id\";\r\n\r\n\t/**\r\n\t * Column name\r\n\t */\r\n\tstatic final String KEY_KEY = \"key\";\r\n\r\n\t/**\r\n\t * Column name\r\n\t */\r\n\tstatic final String KEY_VALUE = \"value\";\r\n\r\n\t// ---------------------------------------------------------------------------\n\t// Constants\n\t// ---------------------------------------------------------------------------\n\n\t/**\n\t * Boolean value \"true\"\n\t */\n\tpublic static final String TRUE = Boolean.toString(true);\n\n\t/**\n\t * Boolean value \"false\"\n\t */\n    public static final String FALSE = Boolean.toString(false);\n\n\t/**\n\t * GIBA authentication\n\t */\n\tpublic static final String GIBA_AUTHENT = \"GIBA\";\n\n\t/**\n\t * HTTP Digest authentication\n\t */\n    public static final String DIGEST_AUTHENT = \"DIGEST\";\n\n\t/**\n\t * Any access\n\t */\n    public static final int ANY_ACCESS = -1;\n\n\t/**\n\t * Mobile access\n\t */\n    public static final int MOBILE_ACCESS = ConnectivityManager.TYPE_MOBILE;\n\n\t/**\n\t * Wi-Fi access\n\t */\n    public static final int WIFI_ACCESS = ConnectivityManager.TYPE_WIFI;\n\n    /**\n     * Folder path for certificate\n     */\n    public static final String CERTIFICATE_FOLDER_PATH = \"/sdcard/\";\n\n    /**\n     * File type for certificate\n     */\n    public static final String CERTIFICATE_FILE_TYPE = \".crt\";\n\n\t/**\n\t * No auto config mode\n\t */\n    public static final int NO_AUTO_CONFIG = 0;\n\n\t/**\n\t * HTTPS auto config mode\n\t */\n    public static final int HTTPS_AUTO_CONFIG = 1;\n\n    // ---------------------------------------------------------------------------\r\n\t// UI settings\r\n\t// ---------------------------------------------------------------------------\r\n\r\n\t/**\n     * Activate or not the RCS service\n     */\r\n\tpublic static final String SERVICE_ACTIVATED = \"ServiceActivated\";\r\n\r\n\t/**\n     * Roaming authorization parameter which indicates if the RCS service may be used or not in roaming\n     */\r\n\tpublic static final String ROAMING_AUTHORIZED = \"RoamingAuthorized\";\r\n\r\n\t/**\n     * Ringtone which is played when a social presence sharing invitation is received\n     */\r\n\tpublic static final String PRESENCE_INVITATION_RINGTONE = \"PresenceInvitationRingtone\";\n\n    /**\n     * Vibrate or not when a social presence sharing invitation is received\n     */\r\n\tpublic static final String PRESENCE_INVITATION_VIBRATE = \"PresenceInvitationVibrate\";\n\n    /**\n     * Ringtone which is played when a content sharing invitation is received\n     */\r\n\tpublic static final String CSH_INVITATION_RINGTONE = \"CShInvitationRingtone\";\n\n    /**\n     * Vibrate or not when a content sharing invitation is received\n     */\r\n\tpublic static final String CSH_INVITATION_VIBRATE = \"CShInvitationVibrate\";\n\n    /**\n     * Make a beep or not when content sharing is available during a call\n     */\r\n\tpublic static final String CSH_AVAILABLE_BEEP = \"CShAvailableBeep\";\n\n    /**\n     * Video format for video share\n     */\r\n\tpublic static final String CSH_VIDEO_FORMAT = \"CShVideoFormat\";\r\n\r\n\t/**\n     * Video size for video share\n     */\r\n\tpublic static final String CSH_VIDEO_SIZE = \"CShVideoSize\";\r\n\r\n\t/**\n     * Ringtone which is played when a file transfer invitation is received\n     */\r\n\tpublic static final String FILETRANSFER_INVITATION_RINGTONE = \"FileTransferInvitationRingtone\";\n\n    /**\n     * Vibrate or not when a file transfer invitation is received\n     */\r\n\tpublic static final String FILETRANSFER_INVITATION_VIBRATE = \"FileTransferInvitationVibrate\";\r\n\r\n\t/**\n     * Ringtone which is played when a chat invitation is received\n     */\r\n\tpublic static final String CHAT_INVITATION_RINGTONE = \"ChatInvitationRingtone\";\n\n    /**\n     * Vibrate or not when a chat invitation is received\n     */\r\n\tpublic static final String CHAT_INVITATION_VIBRATE = \"ChatInvitationVibrate\";\n\n    /**\n     * Auto-accept mode for chat invitation\n     */\r\n\tpublic static final String CHAT_INVITATION_AUTO_ACCEPT = \"ChatInvitationAutoAccept\";\r\n\r\n\t/**\n     * Predefined freetext\n     */\r\n\tpublic static final String FREETEXT1 = \"Freetext1\";\r\n\r\n\t/**\n     * Predefined freetext\n     */\r\n\tpublic static final String FREETEXT2 = \"Freetext2\";\r\n\r\n\t/**\n     * Predefined freetext\n     */\r\n\tpublic static final String FREETEXT3 = \"Freetext3\";\n\n    /**\n     * Predefined freetext\n     */\r\n\tpublic static final String FREETEXT4 = \"Freetext4\";\r\n\r\n\t// ---------------------------------------------------------------------------\r\n\t// Service settings\r\n\t// ---------------------------------------------------------------------------\r\n\r\n\t/**\r\n\t * Max photo-icon size\r\n\t */\r\n\tpublic static final String MAX_PHOTO_ICON_SIZE = \"MaxPhotoIconSize\";\r\n\r\n\t/**\r\n\t * Max length of the freetext\r\n\t */\r\n\tpublic static final String MAX_FREETXT_LENGTH = \"MaxFreetextLength\";\r\n\r\n\t/**\r\n\t * Max number of participants in a group chat\r\n\t */\r\n\tpublic static final String MAX_CHAT_PARTICIPANTS = \"MaxChatParticipants\";\r\n\r\n\t/**\r\n\t * Max length of a chat message\r\n\t */\r\n\tpublic static final String MAX_CHAT_MSG_LENGTH = \"MaxChatMessageLength\";\r\n\r\n\t/**\r\n\t * Idle duration of a chat session\r\n\t */\r\n\tpublic static final String CHAT_IDLE_DURATION = \"ChatIdleDuration\";\r\n\r\n\t/**\r\n\t * Max size of a file transfer\r\n\t */\r\n\tpublic static final String MAX_FILE_TRANSFER_SIZE = \"MaxFileTransferSize\";\r\n\r\n\t/**\n\t * Warning threshold for file transfer size\n\t */\n\tpublic static final String WARN_FILE_TRANSFER_SIZE = \"WarnFileTransferSize\";\n\n\t/**\r\n\t * Max size of an image share\r\n\t */\r\n\tpublic static final String MAX_IMAGE_SHARE_SIZE = \"MaxImageShareSize\";\r\n\r\n\t/**\r\n\t * Max duration of a video share\r\n\t */\r\n\tpublic static final String MAX_VIDEO_SHARE_DURATION = \"MaxVideoShareDuration\";\r\n\r\n\t/**\r\n\t * Max number of simultaneous chat sessions\r\n\t */\r\n\tpublic static final String MAX_CHAT_SESSIONS = \"MaxChatSessions\";\r\n\r\n\t/**\r\n\t * Max number of simultaneous file transfer sessions\r\n\t */\r\n\tpublic static final String MAX_FILE_TRANSFER_SESSIONS = \"MaxFileTransferSessions\";\r\n\r\n\t/**\r\n\t * Activate or not SMS fallback service\r\n\t */\r\n\tpublic static final String SMS_FALLBACK_SERVICE = \"SmsFallbackService\";\n\n\t/**\n\t * Display a warning if Store & Forward service is activated\n\t */\n\tpublic static final String WARN_SF_SERVICE = \"StoreForwardServiceWarning\";\n\t\n\t/**\n\t * Define when the chat receiver sends the 200 OK back to the sender\n\t */\n\tpublic static final String IM_SESSION_START = \"ImSessionStart\";\n\n\t/**\n\t * Max entries for chat log\n\t */\n\tpublic static final String MAX_CHAT_LOG_ENTRIES = \"MaxChatLogEntries\";\n\n\t/**\n\t * Max entries for richcall log\n\t */\n\tpublic static final String MAX_RICHCALL_LOG_ENTRIES = \"MaxRichcallLogEntries\";\t\n\t\r\n\t// ---------------------------------------------------------------------------\r\n\t// User profile settings\r\n\t// ---------------------------------------------------------------------------\r\n\n\t/**\r\n\t * IMS username or username part of the IMPU (for HTTP Digest only)\r\n\t */\r\n\tpublic static final String USERPROFILE_IMS_USERNAME = \"ImsUsername\";\n\n    /**\n     * IMS display name\n     */\r\n\tpublic static final String USERPROFILE_IMS_DISPLAY_NAME = \"ImsDisplayName\";\n\n    /**\n     * IMS private URI or IMPI (for HTTP Digest only)\n     */\r\n\tpublic static final String USERPROFILE_IMS_PRIVATE_ID = \"ImsPrivateId\";\r\n\r\n\t/**\n     * IMS password (for HTTP Digest only)\n     */\r\n\tpublic static final String USERPROFILE_IMS_PASSWORD = \"ImsPassword\";\r\n\r\n\t/**\r\n\t * IMS home domain (for HTTP Digest only)\r\n\t */\r\n\tpublic static final String USERPROFILE_IMS_HOME_DOMAIN = \"ImsHomeDomain\";\r\n\r\n\t/**\r\n\t * P-CSCF or outbound proxy address for mobile access\r\n\t */\r\n\tpublic static final String IMS_PROXY_ADDR_MOBILE = \"ImsOutboundProxyAddrForMobile\";\r\n\r\n\t/**\n\t * P-CSCF or outbound proxy port for mobile access\n\t */\n\tpublic static final String IMS_PROXY_PORT_MOBILE = \"ImsOutboundProxyPortForMobile\";\n\n\t/**\n\t * P-CSCF or outbound proxy address for Wi-Fi access\n\t */\n\tpublic static final String IMS_PROXY_ADDR_WIFI = \"ImsOutboundProxyAddrForWifi\";\n\n\t/**\n\t * P-CSCF or outbound proxy port for Wi-Fi access\n\t */\n\tpublic static final String IMS_PROXY_PORT_WIFI = \"ImsOutboundProxyPortForWifi\";\n\n\t/**\r\n\t * XDM server address & port\r\n\t */\r\n\tpublic static final String XDM_SERVER = \"XdmServerAddr\";\r\n\r\n\t/**\r\n\t * XDM server login (for HTTP Digest only)\r\n\t */\r\n\tpublic static final String XDM_LOGIN= \"XdmServerLogin\";\r\n\r\n\t/**\r\n\t * XDM server password (for HTTP Digest only)\r\n\t */\r\n\tpublic static final String XDM_PASSWORD = \"XdmServerPassword\";\r\n\r\n\t/**\r\n\t * IM conference URI for group chat session\r\n\t */\r\n\tpublic static final String IM_CONF_URI = \"ImConferenceUri\";\n\n\t/**\n\t * End user confirmation request URI for terms and conditions\n\t */\n\tpublic static final String ENDUSER_CONFIRMATION_URI = \"EndUserConfReqUri\";\n\t\n    /**\n     * Country code\n     */\n\tpublic static final String COUNTRY_CODE = \"CountryCode\";\n\n    /**\n     * Country area code\n     */\n\tpublic static final String COUNTRY_AREA_CODE = \"CountryAreaCode\";\n\n\t// ---------------------------------------------------------------------------\r\n\t// Stack settings\r\n\t// ---------------------------------------------------------------------------\r\n\r\n\t/**\r\n\t * Polling period used before each IMS service check (e.g. test subscription state for presence service)\r\n\t */\r\n\tpublic static final String IMS_SERVICE_POLLING_PERIOD = \"ImsServicePollingPeriod\";\r\n\r\n\t/**\n     * Default SIP port\n     */\r\n\tpublic static final String SIP_DEFAULT_PORT = \"SipListeningPort\";\r\n\r\n\t/**\n     * Default SIP protocol\n     */\r\n    public static final String SIP_DEFAULT_PROTOCOL_FOR_MOBILE = \"SipDefaultProtocolForMobile\";\n\n    /**\n     * Default SIP protocol\n     */\n    public static final String SIP_DEFAULT_PROTOCOL_FOR_WIFI = \"SipDefaultProtocolForWifi\";\n\n    /**\n     * TLS Certifcate root\n     */\n    public static final String TLS_CERTIFICATE_ROOT = \"TlsCertificateRoot\";\n\n    /**\n     * TLS Certifcate intermediate\n     */\n    public static final String TLS_CERTIFICATE_INTERMEDIATE = \"TlsCertificateIntermediate\";\n\r\n\t/**\r\n\t * SIP transaction timeout used to wait a SIP response\r\n\t */\r\n\tpublic static final String SIP_TRANSACTION_TIMEOUT = \"SipTransactionTimeout\";\r\n\r\n\t/**\n     * Default TCP port for MSRP session\n     */\r\n\tpublic static final String MSRP_DEFAULT_PORT = \"DefaultMsrpPort\";\r\n\r\n\t/**\n     * Default UDP port for RTP session\n     */\r\n\tpublic static final String RTP_DEFAULT_PORT = \"DefaultRtpPort\";\r\n\r\n\t/**\r\n\t * MSRP transaction timeout used to wait MSRP response\r\n\t */\r\n    public static final String MSRP_TRANSACTION_TIMEOUT = \"MsrpTransactionTimeout\";\n\r\n\t/**\n     * Registration expire period\n     */\r\n\tpublic static final String REGISTER_EXPIRE_PERIOD = \"RegisterExpirePeriod\";\r\n\r\n\t/**\n     * Registration retry base time\n     */\n\tpublic static final String REGISTER_RETRY_BASE_TIME = \"RegisterRetryBaseTime\";\n\n\t/**\n     * Registration retry max time\n     */\n\tpublic static final String REGISTER_RETRY_MAX_TIME = \"RegisterRetryMaxTime\";\n\n\t/**\r\n\t * Publish expire period\r\n\t */\r\n\tpublic static final String PUBLISH_EXPIRE_PERIOD = \"PublishExpirePeriod\";\r\n\r\n\t/**\n     * Revoke timeout\n     */\r\n\tpublic static final String REVOKE_TIMEOUT = \"RevokeTimeout\";\r\n\r\n\t/**\r\n\t * IMS authentication procedure for mobile access\r\n\t */\r\n\tpublic static final String IMS_AUTHENT_PROCEDURE_MOBILE = \"ImsAuhtenticationProcedureForMobile\";\r\n\r\n\t/**\n\t * IMS authentication procedure for Wi-Fi access\n\t */\n\tpublic static final String IMS_AUTHENT_PROCEDURE_WIFI = \"ImsAuhtenticationProcedureForWifi\";\n\n\t/**\r\n\t * Activate or not Tel-URI format\r\n\t */\r\n\tpublic static final String TEL_URI_FORMAT = \"TelUriFormat\";\r\n\r\n\t/**\n     * Ringing session period. At the end of the period the session is cancelled\n     */\r\n\tpublic static final String RINGING_SESSION_PERIOD = \"RingingPeriod\";\r\n\r\n\t/**\r\n\t * Subscribe expiration timeout\r\n\t */\r\n\tpublic static final String SUBSCRIBE_EXPIRE_PERIOD = \"SubscribeExpirePeriod\";\r\n\r\n\t/**\r\n\t * \"Is-composing\" timeout for chat service\r\n\t */\r\n\tpublic static final String IS_COMPOSING_TIMEOUT = \"IsComposingTimeout\";\r\n\r\n\t/**\r\n\t * SIP session refresh expire period\r\n\t */\r\n\tpublic static final String SESSION_REFRESH_EXPIRE_PERIOD = \"SessionRefreshExpirePeriod\";\r\n\r\n\t/**\r\n\t * Activate or not permanent state mode\r\n\t */\r\n\tpublic static final String PERMANENT_STATE_MODE = \"PermanentState\";\r\n\r\n\t/**\r\n\t * Activate or not the traces\r\n\t */\r\n\tpublic static final String TRACE_ACTIVATED = \"TraceActivated\";\r\n\r\n\t/**\r\n\t * Logger trace level\r\n\t */\r\n\tpublic static final String TRACE_LEVEL = \"TraceLevel\";\r\n\r\n\t/**\r\n\t * Activate or not the SIP trace\r\n\t */\r\n\tpublic static final String SIP_TRACE_ACTIVATED = \"SipTraceActivated\";\r\n\n    /**\n     * SIP trace file\n     */\n    public static final String SIP_TRACE_FILE = \"SipTraceFile\";\n\t\r\n\t/**\r\n\t * Activate or not the media trace\r\n\t */\r\n\tpublic static final String MEDIA_TRACE_ACTIVATED = \"MediaTraceActivated\";\r\n\n\t/**\n\t * Capability refresh timeout used to avoid too many requests in a short time\n\t */\n\tpublic static final String CAPABILITY_REFRESH_TIMEOUT = \"CapabilityRefreshTimeout\";\n\n\t/**\n\t * Capability refresh timeout used to decide when to refresh contact capabilities\n\t */\n\tpublic static final String CAPABILITY_EXPIRY_TIMEOUT = \"CapabilityExpiryTimeout\";\n\n\t/**\n\t * Polling period used to decide when to refresh contacts capabilities\n\t */\n\tpublic static final String CAPABILITY_POLLING_PERIOD = \"CapabilityPollingPeriod\";\n\n\t/**\n\t * CS video capability\n\t */\n\tpublic static final String CAPABILITY_CS_VIDEO = \"CapabilityCsVideo\";\n\n\t/**\n\t * Image sharing capability\n\t */\n\tpublic static final String CAPABILITY_IMAGE_SHARING = \"CapabilityImageShare\";\n\n\t/**\n\t * Video sharing capability\n\t */\n\tpublic static final String CAPABILITY_VIDEO_SHARING = \"CapabilityVideoShare\";\n\n\t/**\n\t * Instant Messaging session capability\n\t */\n\tpublic static final String CAPABILITY_IM_SESSION = \"CapabilityImSession\";\n\n\t/**\n\t * File transfer capability\n\t */\n\tpublic static final String CAPABILITY_FILE_TRANSFER = \"CapabilityFileTransfer\";\n\n\t/**\n\t * Presence discovery capability\n\t */\n\tpublic static final String CAPABILITY_PRESENCE_DISCOVERY = \"CapabilityPresenceDiscovery\";\n\n\t/**\n\t * Social presence capability\n\t */\n\tpublic static final String CAPABILITY_SOCIAL_PRESENCE = \"CapabilitySocialPresence\";\n\n    /**\n     * RCS extensions capability\n     */\n\tpublic static final String CAPABILITY_RCS_EXTENSIONS = \"CapabilityRcsExtensions\";\n\n\t/**\n     * Instant messaging is always on (Store & Forward server)\n     */\n\tpublic static final String IM_CAPABILITY_ALWAYS_ON = \"ImAlwaysOn\";\n\n\t/**\n     * Instant messaging use report\n     */\n\tpublic static final String IM_USE_REPORTS = \"ImUseReports\";\n\n\t/**\n\t * Network access authorized\n\t */\n\tpublic static final String NETWORK_ACCESS = \"NetworkAccess\";\n\n\t/**\n     * SIP stack timer T1\n     */\n\tpublic static final String SIP_TIMER_T1 = \"SipTimerT1\";\n\n\t/**\n     * SIP stack timer T2\n     */\n\tpublic static final String SIP_TIMER_T2 = \"SipTimerT2\";\n\n\t/**\n     * SIP stack timer T4\n     */\n\tpublic static final String SIP_TIMER_T4 = \"SipTimerT4\";\n\n\t/**\n     * Enable SIP keep alive\n     */\n\tpublic static final String SIP_KEEP_ALIVE = \"SipKeepAlive\";\n\n\t/**\n     * SIP keep alive period\n     */\n\tpublic static final String SIP_KEEP_ALIVE_PERIOD = \"SipKeepAlivePeriod\";\n\n\t/**\n\t * RCS APN\n\t */\n\tpublic static final String RCS_APN = \"RcsApn\";\n\n\t/**\n\t * RCS operator\n\t */\n\tpublic static final String RCS_OPERATOR = \"RcsOperator\";\t\n\n\t/**\n\t * GRUU support\n\t */\n\tpublic static final String GRUU = \"GRUU\";\n\n    /**\n     * CPU always_on support\n     */\n    public static final String CPU_ALWAYS_ON = \"CpuAlwaysOn\";\n\n    /**\n     * Auto configuration mode\n     */\n    public static final String AUTO_CONFIG_MODE = \"Autoconfig\";\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/service/api/client/capability/Capabilities.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.service.api.client.capability;\r\n\r\nimport java.util.ArrayList;\n\nimport android.os.Parcel;\nimport android.os.Parcelable;\n\r\n/**\r\n * Capabilities\r\n * \r\n * @author jexa7410\r\n */\r\npublic class Capabilities implements Parcelable {\r\n\t/**\r\n\t * Image sharing support\r\n\t */\r\n\tprivate boolean imageSharing = false;\r\n\t\r\n\t/**\r\n\t * Video sharing support\r\n\t */\r\n\tprivate boolean videoSharing = false;\r\n\t\r\n\t/**\r\n\t * IM session support\r\n\t */\r\n\tprivate boolean imSession = false;\r\n\r\n\t/**\r\n\t * File transfer support\r\n\t */\r\n\tprivate boolean fileTransfer = false;\r\n\t\r\n\t/**\r\n\t * CS video support\r\n\t */\r\n\tprivate boolean csVideo = false;\r\n\n\t/**\n\t * Presence discovery support\n\t */\n\tprivate boolean presenceDiscovery = false;\t\n\t\n\t/**\n\t * Social presence support\n\t */\n\tprivate boolean socialPresence = false;\t\n\n\t/**\n\t * List of supported extensions\n\t */\n\tprivate ArrayList<String> extensions = new ArrayList<String>();\n\t\r\n\t/**\r\n\t * Last capabilities update\r\n\t */\r\n\tprivate long timestamp = System.currentTimeMillis();\r\n\n\t/**\r\n\t * Constructor\r\n\t */\r\n\tpublic Capabilities() {\r\n\t}\r\n\r\n\t/**\r\n\t * Constructor\r\n\t * \r\n\t * @param source Parcelable source\r\n\t */\r\n\tpublic Capabilities(Parcel source) {\r\n\t\tthis.imageSharing = source.readInt() != 0;\r\n\t\tthis.videoSharing = source.readInt() != 0;\r\n\t\tthis.imSession = source.readInt() != 0;\r\n\t\tthis.fileTransfer = source.readInt() != 0;\r\n\t\tthis.csVideo = source.readInt() != 0;\n\t\tthis.presenceDiscovery = source.readInt() != 0;\n\t\tthis.socialPresence = source.readInt() != 0;\n\t\tthis.timestamp = source.readLong();\n\t\tsource.readStringList(this.extensions);\r\n    }\r\n\r\n\t/**\r\n\t * Describe the kinds of special objects contained in this Parcelable's\r\n\t * marshalled representation\r\n\t * \r\n\t * @return Integer\r\n\t */\r\n\tpublic int describeContents() {\r\n        return 0;\r\n    }\r\n\r\n\t/**\r\n\t * Write parcelable object\r\n\t * \r\n\t * @param dest The Parcel in which the object should be written\r\n\t * @param flags Additional flags about how the object should be written\r\n\t */\r\n    public void writeToParcel(Parcel dest, int flags) {\r\n    \tdest.writeInt(imageSharing ? 1 : 0);\r\n    \tdest.writeInt(videoSharing ? 1 : 0);\r\n    \tdest.writeInt(imSession ? 1 : 0);\r\n    \tdest.writeInt(fileTransfer ? 1 : 0);\r\n    \tdest.writeInt(csVideo ? 1 : 0);\n    \tdest.writeInt(presenceDiscovery ? 1 : 0);\n    \tdest.writeInt(socialPresence ? 1 : 0);\r\n    \tdest.writeLong(timestamp);\n\t\tif (extensions!=null && extensions.size()>0){\n\t\t\tdest.writeStringList(extensions);\n\t\t}\r\n    }\r\n\r\n    /**\r\n     * Parcelable creator\r\n     */\r\n    public static final Parcelable.Creator<Capabilities> CREATOR\r\n            = new Parcelable.Creator<Capabilities>() {\r\n        public Capabilities createFromParcel(Parcel source) {\r\n            return new Capabilities(source);\r\n        }\r\n\r\n        public Capabilities[] newArray(int size) {\r\n            return new Capabilities[size];\r\n        }\r\n    };\t\r\n\r\n    /**\r\n\t * Is image sharing supported\r\n\t * \r\n\t * @return Boolean\r\n\t */\r\n\tpublic boolean isImageSharingSupported() {\r\n\t\treturn imageSharing;\r\n\t}\r\n\r\n\t/**\r\n\t * Set the image sharing support\r\n\t * \r\n\t * @param supported Supported \r\n\t */\r\n\tpublic void setImageSharingSupport(boolean supported) {\r\n\t\tthis.imageSharing = supported;\r\n\t}\r\n\r\n\t/**\r\n\t * Is video sharing supported\r\n\t * \r\n\t * @return Boolean\r\n\t */\r\n\tpublic boolean isVideoSharingSupported() {\r\n\t\treturn videoSharing;\r\n\t}\r\n\r\n\t/**\r\n\t * Set the video sharing support\r\n\t * \r\n\t * @param supported Supported \r\n\t */\r\n\tpublic void setVideoSharingSupport(boolean supported) {\r\n\t\tthis.videoSharing = supported;\r\n\t}\r\n\r\n\t/**\r\n\t * Is IM session supported\r\n\t * \r\n\t * @return Boolean\r\n\t */\r\n\tpublic boolean isImSessionSupported() {\r\n\t\treturn imSession;\r\n\t}\r\n\r\n\t/**\r\n\t * Set the IM session support\r\n\t * \r\n\t * @param supported Supported \r\n\t */\r\n\tpublic void setImSessionSupport(boolean supported) {\r\n\t\tthis.imSession = supported;\r\n\t}\r\n\r\n\t/**\r\n\t * Is file transfer supported\r\n\t * \r\n\t * @return Boolean\r\n\t */\r\n\tpublic boolean isFileTransferSupported() {\r\n\t\treturn fileTransfer;\r\n\t}\r\n\t\r\n\t/**\r\n\t * Set the file transfer support\r\n\t * \r\n\t * @param supported Supported \r\n\t */\r\n\tpublic void setFileTransferSupport(boolean supported) {\r\n\t\tthis.fileTransfer = supported;\r\n\t}\r\n\t\r\n\t/**\r\n\t * Is CS video supported\r\n\t * \r\n\t * @return Boolean\r\n\t */\r\n\tpublic boolean isCsVideoSupported() {\r\n\t\treturn csVideo;\r\n\t}\r\n\r\n\t/**\r\n\t * Set the CS video support\r\n\t * \r\n\t * @param supported Supported \r\n\t */\r\n\tpublic void setCsVideoSupport(boolean supported) {\r\n\t\tthis.csVideo = supported;\r\n\t}\r\n\n\t/**\n\t * Is presence discovery supported\n\t * \n\t * @return Boolean\n\t */\n\tpublic boolean isPresenceDiscoverySupported() {\n\t\treturn presenceDiscovery;\n\t}\n\n\t/**\n\t * Set the presence discovery support\n\t * \n\t * @param supported Supported \n\t */\n\tpublic void setPresenceDiscoverySupport(boolean supported) {\n\t\tthis.presenceDiscovery = supported;\n\t}\n\n\t/**\n\t * Is social presence supported\n\t * \n\t * @return Boolean\n\t */\n\tpublic boolean isSocialPresenceSupported() {\n\t\treturn socialPresence;\n\t}\n\n\t/**\n\t * Set the social presence support\n\t * \n\t * @param supported Supported \n\t */\n\tpublic void setSocialPresenceSupport(boolean supported) {\n\t\tthis.socialPresence = supported;\n\t}\n\n\t/**\n\t * Add supported extension\n\t * \n\t * @param tag Feature tag\n\t */\n\tpublic void addSupportedExtension(String tag) {\n\t\textensions.add(tag);\n\t}\n\t\n\t/**\n\t * Get list of supported extensions\n\t * \n\t * @return List\n\t */\n\tpublic ArrayList<String> getSupportedExtensions() {\n\t\treturn extensions;\n\t}\n\t\r\n\t/**\r\n\t * Get the capabilities timestamp \r\n\t * \r\n\t * @return Timestamp (in milliseconds)\r\n\t */\r\n\tpublic long getTimestamp() {\r\n\t\treturn timestamp;\r\n\t}\r\n\r\n\t/**\r\n\t * Set capabilities timestamp\r\n\t * \r\n\t * @param Timestamp\r\n\t */\r\n\tpublic void setTimestamp(long timestamp) {\r\n\t\tthis.timestamp = timestamp;\r\n\t}\r\n\t\r\n\t/**\r\n\t * Returns a string representation of the object\r\n\t * \r\n\t * @return String\r\n\t */\r\n\tpublic String toString() {\r\n\t\treturn \"Image_share=\" + imageSharing +\n\t\t\t\", Video_share=\" + videoSharing +\r\n\t\t\t\", FT=\" + fileTransfer +\r\n\t\t\t\", IM=\" + imSession +\r\n\t\t\t\", CS_video=\" + csVideo +\r\n\t\t\t\", Presence_discovery=\" + presenceDiscovery +\n\t\t\t\", Social_presence=\" + socialPresence +\n\t\t\t\", Timestamp=\" + timestamp;\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/service/api/client/media/IMediaEventListener.aidl",
    "content": "package com.orangelabs.rcs.service.api.client.media;\r\n\r\n/**\r\n * Media event listener\r\n */\r\ninterface IMediaEventListener {\r\n\t// Media is opened\r\n\tvoid mediaOpened();\r\n\t\r\n\t// Media is closed\r\n\tvoid mediaClosed();\r\n\r\n\t// Media is started\r\n\tvoid mediaStarted();\r\n\t\r\n\t// Media is stopped\r\n\tvoid mediaStopped();\r\n\r\n\t// Media has failed\r\n\tvoid mediaError(in String error);\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/service/api/client/media/IMediaPlayer.aidl",
    "content": "package com.orangelabs.rcs.service.api.client.media;\r\n\r\nimport com.orangelabs.rcs.service.api.client.media.IMediaEventListener;\r\nimport com.orangelabs.rcs.service.api.client.media.MediaCodec;\r\n\r\n/**\r\n * Media RTP player\r\n */\r\ninterface IMediaPlayer {\r\n\t// Open the player\r\n\tvoid open(in String remoteHost, in int remotePort);\r\n\r\n\t// Close the player\r\n\tvoid close();\r\n\r\n\t// Start the player\r\n\tvoid start();\r\n\r\n\t// Stop the player\r\n\tvoid stop();\r\n\r\n\t// Returns the local RTP port\r\n\tint getLocalRtpPort();\r\n\r\n\t// Add a media listener\r\n\tvoid addListener(in IMediaEventListener listener);\r\n\r\n\t// Remove media listeners\r\n\tvoid removeAllListeners();\r\n\r\n\t// Get supported media codecs\r\n\tMediaCodec[] getSupportedMediaCodecs();\r\n\r\n\t// Get media codec\r\n\tMediaCodec getMediaCodec();\r\n\r\n\t// Set media codec\r\n\tvoid setMediaCodec(in MediaCodec mediaCodec);\r\n}"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/service/api/client/media/IMediaRenderer.aidl",
    "content": "package com.orangelabs.rcs.service.api.client.media;\r\n\r\nimport com.orangelabs.rcs.service.api.client.media.IMediaEventListener;\r\nimport com.orangelabs.rcs.service.api.client.media.MediaCodec;\r\n\r\n/**\r\n * Media RTP renderer\r\n */\r\ninterface IMediaRenderer {\r\n\t// Open the renderer\r\n\tvoid open(in String remoteHost, in int remotePort);\r\n\r\n\t// Close the renderer\r\n\tvoid close();\r\n\r\n\t// Start the renderer\r\n\tvoid start();\r\n\r\n\t// Stop the renderer\r\n\tvoid stop();\r\n\r\n\t// Returns the local RTP port\r\n\tint getLocalRtpPort();\r\n\r\n\t// Add a media listener\r\n\tvoid addListener(in IMediaEventListener listener);\r\n\r\n\t// Remove media listeners\r\n\tvoid removeAllListeners();\r\n\r\n\t// Get supported media codecs\r\n\tMediaCodec[] getSupportedMediaCodecs();\r\n\r\n    \t// Get media codec\r\n\tMediaCodec getMediaCodec();\r\n\r\n\t// Set media codec\r\n\tvoid setMediaCodec(in MediaCodec mediaCodec);\r\n}"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/service/api/client/media/MediaCodec.aidl",
    "content": "package com.orangelabs.rcs.service.api.client.media;\r\n\r\nparcelable MediaCodec;"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/service/api/client/media/MediaCodec.java",
    "content": "/*******************************************************************************\r\n * Software Name : RCS IMS Stack\r\n *\r\n * Copyright (C) 2010 France Telecom S.A.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n *      http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n ******************************************************************************/\r\n\r\npackage com.orangelabs.rcs.service.api.client.media;\r\n\r\nimport android.os.Bundle;\r\nimport android.os.Parcel;\r\nimport android.os.Parcelable;\r\n\r\nimport java.util.Enumeration;\r\nimport java.util.Hashtable;\r\nimport java.util.Iterator;\r\nimport java.util.Set;\r\n\r\n/**\r\n * Media Codec\r\n * \r\n * @author hlxn7157\r\n */\r\npublic class MediaCodec implements Parcelable {\r\n    /**\r\n     * Codec name\r\n     */\r\n    private String codecName;\r\n\r\n    /**\r\n     * Codec parameters\r\n     */\r\n    private Hashtable<String, String> parameters = new Hashtable<String, String>();\r\n\r\n    /**\r\n     * Constructor\r\n     * \r\n     * @param codecName Codec name\r\n     */\r\n    public MediaCodec(String codecName) {\r\n        this.codecName = codecName;\r\n    }\r\n\r\n    /**\r\n     * Constructor\r\n     * \r\n     * @param source Parcelable source\r\n     */\r\n    public MediaCodec(Parcel source) {\r\n        this.codecName = source.readString();\r\n\r\n        Bundle parametersBundle = source.readBundle();\r\n        Set<String> keys = parametersBundle.keySet();\r\n        Iterator<String> i = keys.iterator();\r\n        while (i.hasNext()) {\r\n            String key = i.next().toString();\r\n            String value = parametersBundle.getString(key);\r\n            this.parameters.put(key, value);\r\n        }\r\n    }\r\n\r\n    /**\r\n     * Describe the kinds of special objects contained in this Parcelable's\r\n     * marshalled representation\r\n     * \r\n     * @return Integer\r\n     */\r\n    public int describeContents() {\r\n        return 0;\r\n    }\r\n\r\n    /**\r\n     * Write parcelable object\r\n     * \r\n     * @param dest The Parcel in which the object should be written\r\n     * @param flags Additional flags about how the object should be written\r\n     */\r\n    public void writeToParcel(Parcel dest, int flags) {\r\n        Bundle parametersBundle = new Bundle();\r\n        Enumeration<String> e = parameters.keys();\r\n        while (e.hasMoreElements()) {\r\n            String key = e.nextElement().toString();\r\n            parametersBundle.putString(key, parameters.get(key));\r\n        }\r\n\r\n        dest.writeString(codecName);\r\n        dest.writeBundle(parametersBundle);\r\n    }\r\n\r\n    /**\r\n     * Parcelable creator\r\n     */\r\n    public static final Parcelable.Creator<MediaCodec> CREATOR = new Parcelable.Creator<MediaCodec>() {\r\n        public MediaCodec createFromParcel(Parcel source) {\r\n            return new MediaCodec(source);\r\n        }\r\n\r\n        public MediaCodec[] newArray(int size) {\r\n            return new MediaCodec[size];\r\n        }\r\n    };\r\n\r\n    /**\r\n     * Get codec name\r\n     * \r\n     * @return Codec name\r\n     */\r\n    public String getCodecName() {\r\n        return codecName;\r\n    }\r\n\r\n    /**\r\n     * Set codec name\r\n     * \r\n     * @param codecName Codec name\r\n     */\r\n    public void setCodecName(String codecName) {\r\n        this.codecName = codecName;\r\n    }\r\n\r\n    /**\r\n     * Get a codec parameter as string\r\n     * \r\n     * @param key Parameter key\r\n     * @return Parameter value\r\n     */\r\n    public String getStringParam(String key) {\r\n        if (key != null) {\r\n            return parameters.get(key);\r\n        } else {\r\n            return null;\r\n        }\r\n    }\r\n\r\n    /**\r\n     * Get a codec parameter as integer\r\n     * \r\n     * @param key Parameter key\r\n     * @param defaultValue default value\r\n     * @return Parameter value\r\n     */\r\n    public int getIntParam(String key, int defaultValue) {\r\n        String value = getStringParam(key);\r\n        try {\r\n            return Integer.parseInt(value);\r\n        } catch(Exception e) {\r\n            return defaultValue;\r\n        }\r\n    }\r\n\r\n    /**\r\n     * Set a codec parameter\r\n     * \r\n     * @param key Parameter key\r\n     * @param value Parameter value\r\n     */\r\n    public void setParam(String key, String value) {\r\n        parameters.put(key, value);\r\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/service/api/client/media/video/VideoCodec.java",
    "content": "/*******************************************************************************\r\n * Software Name : RCS IMS Stack\r\n *\r\n * Copyright (C) 2010 France Telecom S.A.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n *      http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n ******************************************************************************/\r\n\r\npackage com.orangelabs.rcs.service.api.client.media.video;\r\n\r\nimport com.orangelabs.rcs.service.api.client.media.MediaCodec;\r\n\r\n/**\r\n * Video codec\r\n * \r\n * @author hlxn7157\r\n */\r\npublic class VideoCodec {\r\n\r\n    /**\r\n     * Media codec\r\n     */\r\n    private MediaCodec mediaCodec;\r\n\r\n    /**\r\n     * Payload key\r\n     */\r\n    private static final String PAYLOAD = \"payload\";\r\n\r\n    /**\r\n     * Clock rate key\r\n     */\r\n    private static final String CLOCKRATE = \"clockRate\";\r\n\r\n    /**\r\n     * Codec param key\r\n     */\r\n    private static final String CODECPARAMS = \"codecParams\";\r\n\r\n    /**\r\n     * Frame rate key\r\n     */\r\n    private static final String FRAMERATE = \"framerate\";\r\n\r\n    /**\r\n     * Bit rate key\r\n     */\r\n    private static final String BITRATE = \"bitrate\";\r\n\r\n    /**\r\n     * Codec width key\r\n     */\r\n    private static final String CODECWIDTH = \"codecWidth\";\r\n\r\n    /**\r\n     * Codec height key\r\n     */\r\n    private static final String CODECHEIGHT = \"codecHeight\";\r\n\r\n    /**\r\n     * Constructor\r\n     * \r\n     * @param codecName Codec name\r\n     * @param clockRate Clock rate\r\n     * @param codecParams Codec parameters\r\n     * @param framerate Frame rate\r\n     * @param bitrate Bit rate\r\n     * @param width Video width\r\n     * @param height Video height\r\n     */\r\n    public VideoCodec(String codecName, int payload, int clockRate, String codecParams, int framerate,\r\n            int bitrate, int width, int height) {\r\n        mediaCodec = new MediaCodec(codecName);\r\n        mediaCodec.setParam(PAYLOAD, \"\" + payload);\r\n        mediaCodec.setParam(CLOCKRATE, \"\" + clockRate);\r\n        mediaCodec.setParam(CODECPARAMS, codecParams);\r\n        mediaCodec.setParam(FRAMERATE, \"\" + framerate);\r\n        mediaCodec.setParam(BITRATE, \"\" + bitrate);\r\n        mediaCodec.setParam(CODECWIDTH, \"\" + width);\r\n        mediaCodec.setParam(CODECHEIGHT, \"\" + height);\r\n    }\r\n\r\n    /**\r\n     * Constructor\r\n     * \r\n     * @param mediaCodec Media codec\r\n     */\r\n    public VideoCodec(MediaCodec mediaCodec) {\r\n        this.mediaCodec = mediaCodec;\r\n    }\r\n\r\n    /**\r\n     * Get media codec\r\n     * \r\n     * @return media codec\r\n     */\r\n    public MediaCodec getMediaCodec() {\r\n        return mediaCodec;\r\n    }\r\n\r\n    /**\r\n     * Get codec name\r\n     * \r\n     * @return Codec name\r\n     */\r\n    public String getCodecName() {\r\n        return mediaCodec.getCodecName();\r\n    }\r\n\r\n    /**\r\n     * Get payload\r\n     * \r\n     * @return payload\r\n     */\r\n    public int getPayload() {\r\n        return mediaCodec.getIntParam(PAYLOAD, 96);\r\n    }\r\n\r\n    /**\r\n     * Get video clock rate\r\n     * \r\n     * @return Video clock rate\r\n     */\r\n    public int getClockRate() {\r\n        return mediaCodec.getIntParam(CLOCKRATE, 90000);\r\n    }\r\n\r\n    /**\r\n     * Get video codec parameters\r\n     * \r\n     * @return Video codec parameters\r\n     */\r\n    public String getCodecParams() {\r\n        return mediaCodec.getStringParam(CODECPARAMS);\r\n    }\r\n\r\n    /**\r\n     * Get video frame rate\r\n     * \r\n     * @return Video frame rate\r\n     */\r\n    public int getFramerate() {\r\n        return mediaCodec.getIntParam(FRAMERATE, 15);\r\n    }\r\n\r\n    /**\r\n     * Get video bitrate\r\n     * \r\n     * @return Video bitrate\r\n     */\r\n    public int getBitrate() {\r\n        return mediaCodec.getIntParam(BITRATE, 0);\r\n    }\r\n\r\n    /**\r\n     * Get video width\r\n     * \r\n     * @return Video width\r\n     */\r\n    public int getWidth() {\r\n        return mediaCodec.getIntParam(CODECWIDTH, 176);\r\n    }\r\n\r\n    /**\r\n     * Get video height\r\n     * \r\n     * @return Video height\r\n     */\r\n    public int getHeight() {\r\n        return mediaCodec.getIntParam(CODECHEIGHT, 144);\r\n    }\r\n\r\n    /**\r\n     * Compare codec encodings and resolutions\r\n     * \r\n     * @param codec Codec to compare\r\n     * @return True if codecs are equals\r\n     */\r\n    public boolean compare(VideoCodec codec) {\r\n        if (getCodecName().equalsIgnoreCase(codec.getCodecName()) &&\r\n        \t\tgetWidth() == codec.getWidth() &&\r\n        \t\t\tgetHeight() == codec.getHeight())\r\n            return true;\r\n        return false;\r\n    }\r\n\r\n    /**\r\n     * Check if a codec is in a list\r\n     *\r\n     * @param supportedCodecs list of supported codec\r\n     * @param codec selected codec\r\n     * @return True if the codec is in the list\r\n     */\r\n    public static boolean checkVideoCodec(MediaCodec[] supportedCodecs, VideoCodec codec) {\r\n        for (int i = 0; i < supportedCodecs.length; i++) {\r\n            if (codec.compare(new VideoCodec(supportedCodecs[i])))\r\n                return true;\r\n        }\r\n        return false;\r\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/service/api/client/media/video/VideoSurfaceView.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.service.api.client.media.video;\n\r\nimport android.content.Context;\nimport android.graphics.Bitmap;\nimport android.graphics.Canvas;\nimport android.util.AttributeSet;\nimport android.view.SurfaceHolder;\nimport android.view.SurfaceView;\n\r\n/**\r\n * Video surface view\r\n * \r\n * @author jexa7410\r\n */\r\npublic class VideoSurfaceView extends SurfaceView {    \r\n\t/**\n     * No aspect ratio\n     */\n    public static float NO_RATIO = 0.0f;\n\n    /**\r\n\t * Display area aspect ratio\r\n\t */\r\n\tprivate float aspectRatio = NO_RATIO;\r\n    \r\n\t/**\r\n\t * Surface has been created state\r\n\t */\r\n\tprivate boolean surfaceCreated = false;\n\t\r\n\t/**\r\n\t * Surface holder\r\n\t */\r\n\tprivate SurfaceHolder holder;\r\n\r\n\t/**\r\n\t * Constructor\r\n\t * \r\n\t * @param context Context\r\n\t */\r\n    public VideoSurfaceView(Context context) {\r\n        super(context);\r\n\n        init();\r\n    }\r\n    \r\n    /**\r\n     * Constructor\r\n     * \r\n     * @param context Context\r\n     * @param attrs Attributes\r\n     */\r\n    public VideoSurfaceView(Context context, AttributeSet attrs) {\r\n        super(context, attrs);\r\n        \n        init();\r\n    }\r\n    \r\n    /**\r\n     * Constructor\r\n     * \r\n     * @param context Context\r\n     * @param attrs Attributes\r\n     * @param defStyle Style\r\n     */\r\n    public VideoSurfaceView(Context context, AttributeSet attrs, int defStyle) {\r\n        super(context, attrs, defStyle);\n        \r\n        init();\r\n    }\r\n\r\n    /**\r\n     * Set aspect ration according to desired width and height\r\n     * \r\n     * @param width Width\r\n     * @param height Height\r\n     */\r\n    public void setAspectRatio(int width, int height) {\r\n        setAspectRatio((float)width / (float)height);\r\n    }\r\n    \r\n    /**\r\n     * Set aspect ratio\r\n     * \r\n     * @param ratio Ratio\r\n     */\r\n    public void setAspectRatio(float ratio) {\r\n        if (aspectRatio != ratio) {\r\n            aspectRatio = ratio;\r\n            requestLayout();\r\n            invalidate();\r\n        }\r\n    }\r\n\r\n    /**\r\n     * Ensure aspect ratio\r\n     * \r\n     * @param widthMeasureSpec Width\r\n     * @param heightMeasureSpec Heigh\r\n     */\r\n    protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) {\r\n        if (aspectRatio != NO_RATIO) {\r\n            int widthSpecSize =  MeasureSpec.getSize(widthMeasureSpec);\r\n            int heightSpecSize =  MeasureSpec.getSize(heightMeasureSpec);\r\n\r\n            int width = widthSpecSize;\r\n            int height = heightSpecSize;\r\n\r\n            if (width > 0 && height > 0) {\r\n                float defaultRatio = ((float) width) / ((float) height);\r\n                if (defaultRatio < aspectRatio) {\r\n                    // Need to reduce height\r\n                    height = (int) (width / aspectRatio);\r\n                } else if (defaultRatio > aspectRatio) {\r\n                    width = (int) (height * aspectRatio);\r\n                }\r\n                width = Math.min(width, widthSpecSize);\r\n                height = Math.min(height, heightSpecSize);\r\n                setMeasuredDimension(width, height);\r\n                return;\r\n            }\r\n        }\r\n        super.onMeasure(widthMeasureSpec, heightMeasureSpec);\r\n    }\r\n    \r\n    /**\r\n\t * Set image from a bitmap\r\n\t * \r\n\t * @param bmp Bitmap\r\n\t */\r\n\tpublic void setImage(Bitmap bmp) {\t\r\n\t\tif (surfaceCreated) {\r\n\t\t\tCanvas canvas = null;\r\n\t\t\ttry {\t\t\t\t\r\n\t\t\t\tsynchronized(holder) {\r\n\t\t\t\t\tcanvas = holder.lockCanvas();\t\t\t\t\t\r\n\t\t\t\t}\t\t\t\t\t\t\t\r\n\t\t\t} finally {\r\n\t\t\t\tif (canvas != null) {\r\n\t\t\t\t\t// First clear screen\r\n\t\t\t\t\tcanvas.drawARGB(255, 0, 0, 0);\r\n\t\t\t\t\t\r\n\t\t\t\t\t// Then draw bmp\r\n\t\t\t\t\tcanvas.drawBitmap(bmp, null, canvas.getClipBounds(), null);\t\t\t\t\t\r\n\t\t\t\t\tholder.unlockCanvasAndPost(canvas);\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\n\t\n\tpublic void clearImage() {\t\n\t\tif (surfaceCreated) {\n\t\t\tCanvas canvas = null;\n\t\t\ttry {\t\t\t\t\n\t\t\t\tsynchronized(holder) {\n\t\t\t\t\tcanvas = holder.lockCanvas();\t\t\t\t\t\n\t\t\t\t}\t\t\t\t\t\t\t\n\t\t\t} finally {\n\t\t\t\tif (canvas != null) {\n\t\t\t\t\t// Clear screen\n\t\t\t\t\tcanvas.drawARGB(255, 0, 0, 0);\n\t\t\t\t\t\n\t\t\t\t\tholder.unlockCanvasAndPost(canvas);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t/**\r\n\t * Init the view\r\n\t */\r\n\tprivate void init() {\n\t\t// Get a surface holder\r\n\t\tholder = this.getHolder();\r\n        holder.addCallback(surfaceCallback);\n\t}\r\n\t\r\n\t/**\r\n\t * Surface holder callback\r\n\t */\r\n\tprivate SurfaceHolder.Callback surfaceCallback = new SurfaceHolder.Callback() {\r\n\t\tpublic void surfaceChanged(SurfaceHolder _holder, int format, int w,int h) {\r\n\t\t}\r\n\r\n\t\tpublic void surfaceCreated(SurfaceHolder _holder) {\r\n\t\t\tsurfaceCreated = true;\n\t\t}\r\n\r\n\t\tpublic void surfaceDestroyed(SurfaceHolder _holder) {\r\n\t\t\tsurfaceCreated = false;\r\n\t\t}\r\n\t};\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/utils/FifoBuffer.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.utils;\r\n\r\nimport java.util.Vector;\n\r\n/**\r\n * FIFO buffer\r\n * \r\n * @author JM. Auffret\r\n */\r\npublic class FifoBuffer {\r\n\t/**\r\n\t * Number of objects in the buffer\r\n\t */\r\n\tprivate int nbObjects = 0;\r\n\r\n\t/**\r\n\t * Buffer of objects\r\n\t */\r\n\tprivate Vector<Object> fifo = new Vector<Object>();\r\n\r\n\t/**\r\n\t * Add an object in the buffer\r\n\t *\r\n\t * @param obj Message\r\n\t */\r\n\tpublic synchronized void addObject(Object obj) {\r\n\t\tfifo.addElement(obj);\r\n\t\tnbObjects++;\r\n\t\tnotifyAll();\r\n\t}\r\n\r\n\t/**\r\n\t * Read an object in the buffer. This is a blocking method until an object is read.\r\n\t * \r\n\t * @return Object\r\n\t */\r\n\tpublic synchronized Object getObject() {\r\n\t\tObject obj = null;\r\n\t\tif (nbObjects == 0) {\r\n\t\t\ttry {\r\n\t\t\t\twait();\r\n\t\t\t} catch (InterruptedException e) {\r\n\t\t\t\t// Nothing to do\r\n\t\t\t}\r\n\t\t}\r\n\t\tif (nbObjects != 0) {\r\n\t\t\tobj = fifo.elementAt(0);\r\n\t\t\tfifo.removeElementAt(0);\r\n\t\t\tnbObjects--;\r\n\t\t\tnotifyAll();\r\n\t\t}\r\n\t\treturn obj;\r\n\t}\r\n\r\n\t/**\r\n\t * Read an object in the buffer. This is a blocking method until a timeout\r\n\t * occurs or an object is read.\r\n\t * \r\n\t * @param timeout Timeout\r\n\t * @return Message\r\n\t */\r\n\tpublic synchronized Object getObject(int timeout) {\r\n\t\tObject obj = null;\r\n\t\tif (nbObjects == 0) {\r\n\t\t\ttry {\r\n\t\t\t\twait(timeout);\r\n\t\t\t} catch (InterruptedException e) {\r\n\t\t\t\t// Nothing to do\r\n\t\t\t}\r\n\t\t}\r\n\t\tif (nbObjects != 0) {\r\n\t\t\tobj = fifo.elementAt(0);\r\n\t\t\tfifo.removeElementAt(0);\r\n\t\t\tnbObjects--;\r\n\t\t\tnotifyAll();\r\n\t\t}\r\n\t\treturn obj;\r\n\t}\r\n\r\n\t/**\r\n\t * Close the buffer\r\n\t */\r\n\tpublic synchronized void close() {\r\n\t\t// Free the semaphore\r\n\t\tthis.notifyAll();\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/utils/NetworkRessourceManager.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.utils;\r\n\r\nimport com.orangelabs.rcs.platform.network.DatagramConnection;\nimport com.orangelabs.rcs.platform.network.NetworkFactory;\nimport com.orangelabs.rcs.platform.network.SocketServerConnection;\nimport com.orangelabs.rcs.provider.settings.RcsSettings;\nimport java.io.IOException;\n\n/**\n * Network ressource manager\n *\n * @author jexa7410\n */\r\npublic class NetworkRessourceManager {\r\n    /**\n     * Default SIP port base\n     */\n    public static int DEFAULT_LOCAL_SIP_PORT_BASE = RcsSettings.getInstance().getSipListeningPort();\n\n    /**\r\n     * Default RTP port base\r\n     */\r\n    public static int DEFAULT_LOCAL_RTP_PORT_BASE = RcsSettings.getInstance().getDefaultRtpPort();\r\n\r\n    /**\r\n     * Default MSRP port base\r\n     */\r\n    public static int DEFAULT_LOCAL_MSRP_PORT_BASE = RcsSettings.getInstance().getDefaultMsrpPort();\n\n    /**\n     * Generate a default free SIP port number\n     *\n     * @return Local SIP port\n     */\n    public static synchronized int generateLocalSipPort() {\n    \treturn generateLocalUdpPort(DEFAULT_LOCAL_SIP_PORT_BASE);\n    }\n\n    /**\n     * Generate a default free RTP port number\n     *\n     * @return Local RTP port\n     */\r\n    public static synchronized int generateLocalRtpPort() {\r\n    \treturn generateLocalUdpPort(DEFAULT_LOCAL_RTP_PORT_BASE);\r\n    }\n\r\n    /**\n     * Generate a default free MSRP port number\n     *\n     * @return Local MSRP port\n     */\r\n    public static synchronized int generateLocalMsrpPort() {\r\n    \treturn generateLocalTcpPort(DEFAULT_LOCAL_MSRP_PORT_BASE);\n    }\n\r\n    /**\n     * Generate a free UDP port number from a specific port base\n     *\n     * @param portBase UDP port base\n     * @return Local UDP port\n     */\r\n    private static int generateLocalUdpPort(int portBase) {\r\n    \tint resp = -1;\r\n\t\tint port = portBase;\r\n\t\twhile((resp == -1) && (port < Integer.MAX_VALUE)) {\r\n\t\t\tif (isLocalUdpPortFree(port)) {\r\n\t\t\t\t// Free UDP port found\r\n\t\t\t\tresp = port;\r\n\t\t\t} else {\n                // +2 needed for RTCP port\n                port += 2;\n\t\t\t}\r\n\t\t}\r\n    \treturn resp;\r\n    }\n\r\n\t/**\n     * Test if the given local UDP port is really free (not used by\n     * other applications)\n     *\n     * @param port Port to check\n     * @return Boolean\n     */\r\n    private static boolean isLocalUdpPortFree(int port) {\r\n    \tboolean res = false;\r\n    \ttry {\r\n    \t\tDatagramConnection conn = NetworkFactory.getFactory().createDatagramConnection();\r\n    \t\tconn.open(port);\r\n            conn.close();\r\n    \t\tres = true;\r\n    \t} catch(IOException e) {\r\n    \t\tres = false;\r\n    \t}\r\n    \treturn res;\r\n    }\n\n    /**\n     * Generate a free TCP port number\n     *\n     * @param portBase TCP port base\n     * @return Local TCP port\n     */\r\n    private static int generateLocalTcpPort(int portBase) {\r\n    \tint resp = -1;\r\n\t\tint port = portBase;\r\n\t\twhile(resp == -1) {\r\n\t\t\tif (isLocalTcpPortFree(port)) {\r\n\t\t\t\t// Free TCP port found\r\n\t\t\t\tresp = port;\r\n\t\t\t} else {\r\n\t\t\t\tport++;\r\n\t\t\t}\r\n\t\t}\r\n    \treturn resp;\r\n    }\r\n\r\n\t/**\n     * Test if the given local TCP port is really free (not used by\n     * other applications)\n     *\n     * @param port Port to check\n     * @return Boolean\n     */\r\n    private static boolean isLocalTcpPortFree(int port) {\r\n    \tboolean res = false;\r\n    \ttry {\r\n    \t\tSocketServerConnection conn = NetworkFactory.getFactory().createSocketServerConnection();\r\n    \t\tconn.open(port);\r\n            conn.close();\r\n    \t\tres = true;\r\n    \t} catch(IOException e) {\r\n    \t\tres = false;\r\n    \t}\r\n    \treturn res;\r\n    }\r\n\r\n    /**\n     * Is a valid IP address\n     *\n     * @param ipAddress IP address\n     * @return Boolean\n     */\r\n    public static boolean isValidIpAddress(String ipAddress) {\r\n    \tboolean result = false;\r\n\t\tif ((ipAddress != null) &&\r\n\t\t\t\t(!ipAddress.equals(\"127.0.0.1\")) &&\r\n\t\t\t\t\t(!ipAddress.equals(\"localhost\"))) {\r\n\t\t\tresult = true;\r\n\t\t}\r\n        return result;\r\n    }\n\n    /**\n     * Convert an IP address to its integer representation\n     *\n     * @param addr IP address\n     * @return Integer\n     */\n    public static int ipToInt(String addr) {\n        String[] addrArray = addr.split(\"\\\\.\");\n        int num = 0;\n        for (int i=0; i<addrArray.length; i++) {\n            int power = 3-i;\n            num += ((Integer.parseInt(addrArray[i])%256 * Math.pow(256,power)));\n        }\n        return num;\n    }\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/utils/logger/Appender.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.utils.logger;\r\n\r\n/**\r\n * Appender\r\n * \r\n * @author jexa7410\r\n */\r\npublic abstract class Appender {\r\n\t/**\r\n\t * Constructor\r\n\t */\r\n\tpublic Appender() {\r\n\t}\r\n\r\n\t/**\r\n\t * Print a trace\r\n\t *\r\n\t * @param classname Classname\r\n\t * @param level Trace level\r\n\t * @param trace Trace\r\n\t */\r\n\tpublic abstract void printTrace(String classname, int level, String trace);\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/com/orangelabs/rcs/utils/logger/Logger.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage com.orangelabs.rcs.utils.logger;\r\n\nimport com.orangelabs.rcs.platform.logger.AndroidAppender;\n\r\n/**\r\n * Logger\r\n * \r\n * @author jexa7410\r\n */\r\npublic class Logger {\r\n\t/**\r\n\t * Trace ON\r\n\t */\r\n\tpublic static boolean TRACE_ON = true;\r\n\r\n\t/**\r\n\t * Trace OFF\r\n\t */\r\n\tpublic static boolean TRACE_OFF = false;\r\n\r\n\t/**\r\n\t * DEBUG level\r\n\t */\r\n\tpublic static int DEBUG_LEVEL = 0;\r\n\r\n\t/**\r\n\t * INFO level\r\n\t */\r\n\tpublic static int INFO_LEVEL = 1;\r\n\t\r\n\t/**\r\n\t * WARN level\r\n\t */\r\n\tpublic static int WARN_LEVEL = 2;\r\n\r\n\t/**\r\n\t * ERROR level\r\n\t */\r\n\tpublic static int ERROR_LEVEL = 3;\r\n\r\n\t/**\r\n\t * FATAL level\r\n\t */\r\n\tpublic static int FATAL_LEVEL = 4;\r\n\t\r\n\t/**\r\n\t * Trace flag\r\n\t */\r\n\tpublic static boolean activationFlag = TRACE_ON;\r\n\r\n\t/**\r\n\t * Trace level\r\n\t */\r\n\tpublic static int traceLevel = DEBUG_LEVEL;\r\n\t\r\n\t/**\r\n\t * List of appenders\r\n\t */\r\n\tprivate static Appender[] appenders = new Appender[] { \n\t\tnew AndroidAppender()\n\t};\n\t\r\n\t/**\r\n\t * Classname\r\n\t */\r\n\tprivate String classname;\r\n\t\r\n\t/**\r\n\t * Constructor\r\n\t * \r\n\t * @param classname Classname\r\n\t */\r\n\tprivate Logger(String classname) {\r\n\t\tint index = classname.lastIndexOf('.');\r\n\t\tif (index != -1) {\r\n\t\t\tthis.classname = classname.substring(index+1);\r\n\t\t} else {\r\n\t\t\tthis.classname = classname;\t\t\r\n\t\t}\r\n\t}\r\n\t\r\n\t/**\r\n\t * Is logger activated\r\n\t * \r\n\t * @return boolean\r\n\t */\r\n\tpublic boolean isActivated() {\r\n\t\treturn (activationFlag == TRACE_ON);\r\n\t}\r\n\r\n\t/**\r\n\t * Debug trace\r\n\t * \r\n\t * @param trace Trace\r\n\t */\r\n\tpublic void debug(String trace) {\r\n\t\tprintTrace(trace, DEBUG_LEVEL);\r\n\t}\r\n\r\n\t/**\r\n\t * Info trace\r\n\t * \r\n\t * @param trace Trace\r\n\t */\r\n\tpublic void info(String trace) {\r\n\t\tprintTrace(trace, INFO_LEVEL);\t\t\r\n\t}\r\n\t\r\n\t/**\r\n\t * Warning trace\r\n\t * \r\n\t * @param trace Trace\r\n\t */\r\n\tpublic void warn(String trace) {\r\n\t\tprintTrace(trace, WARN_LEVEL);\t\t\r\n\t}\r\n\t\r\n\t/**\r\n\t * Error trace\r\n\t * \r\n\t * @param trace Trace\r\n\t */\r\n\tpublic void error(String trace) {\r\n\t\tprintTrace(trace, ERROR_LEVEL);\t\r\n\t}\r\n\r\n\t/**\r\n\t * Error trace\r\n\t * \r\n\t * @param trace Trace\r\n\t * @param e Exception\r\n\t */\r\n\tpublic void error(String trace, Throwable e) {\r\n\t\tprintTrace(trace, ERROR_LEVEL);\r\n\t\te.printStackTrace();\r\n\t}\r\n\t\r\n\t/**\r\n\t * Fatal trace\r\n\t * \r\n\t * @param trace Trace\r\n\t */\r\n\tpublic void fatal(String trace) {\r\n\t\tprintTrace(trace, FATAL_LEVEL);\t\r\n\t}\r\n\r\n\t/**\r\n\t * Fatal trace\r\n\t * \r\n\t * @param trace Trace\r\n\t * @param e Exception\r\n\t */\r\n\tpublic void fatal(String trace, Throwable e) {\r\n\t\tprintTrace(trace, FATAL_LEVEL);\t\r\n\t\te.printStackTrace();\r\n\t}\r\n\r\n\t/**\r\n\t * Print a trace\r\n\t * \r\n\t * @param trace Trace\r\n\t * @param level Trace level\r\n\t */\r\n\tprivate void printTrace(String trace, int level) {\r\n\t\tif ((appenders != null) && (level >= traceLevel)) {\r\n\t\t\tfor(int i=0; i < appenders.length; i++) {\r\n\t\t\t\tappenders[i].printTrace(classname, level, trace);\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\t\r\n\t/**\r\n\t * Set the list of appenders\r\n\t * \r\n\t * @param appenders List of appenders\r\n\t */\r\n\tpublic static void setAppenders(Appender[] appenders) {\r\n\t\tLogger.appenders = appenders;\r\n\t}\r\n\r\n\t/**\r\n\t * Create a static instance\r\n\t * \r\n\t * @param classname Classname\r\n\t * @return Instance\r\n\t */\r\n\tpublic static synchronized Logger getLogger(String classname) {\r\n\t\treturn new Logger(classname);\r\n\t}\r\n\t\r\n\t/**\r\n\t * Get the current appenders\r\n\t * \r\n\t * @return Array of appender\r\n\t */\r\n\tpublic static synchronized Appender[] getAppenders() {\r\n\t\treturn appenders;\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtp/RtpPacket.java",
    "content": "package de.kp.net.rtp;\r\n\r\n/*\r\n * Copyright (C) 2009 The Sipdroid Open Source Project\r\n * Copyright (C) 2005 Luca Veltri - University of Parma - Italy\r\n * \r\n * This file is part of Sipdroid (http://www.sipdroid.org)\r\n * \r\n * Sipdroid is free software; you can redistribute it and/or modify\r\n * it under the terms of the GNU General Public License as published by\r\n * the Free Software Foundation; either version 3 of the License, or\r\n * (at your option) any later version.\r\n * \r\n * This source code is distributed in the hope that it will be useful,\r\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\r\n * GNU General Public License for more details.\r\n * \r\n * You should have received a copy of the GNU General Public License\r\n * along with this source code; if not, write to the Free Software\r\n * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA\r\n */\r\n\r\n/**\r\n * RtpPacket implements a RTP packet.\r\n */\r\npublic class RtpPacket {\r\n\t/* RTP packet buffer containing both the RTP header and payload */\r\n\tbyte[] packet;\r\n\r\n\t/* RTP packet length */\r\n\tint packet_len;\r\n\r\n\t/* RTP header length */\r\n\t// int header_len;\r\n\t/** Gets the RTP packet */\r\n\tpublic byte[] getPacket() {\r\n\t\treturn packet;\r\n\t}\r\n\r\n\t/** Gets the RTP packet length */\r\n\tpublic int getLength() {\r\n\t\treturn packet_len;\r\n\t}\r\n\r\n\t/** Gets the RTP header length */\r\n\tpublic int getHeaderLength() {\r\n\t\tif (packet_len >= 12)\r\n\t\t\treturn 12 + 4 * getCscrCount();\r\n\t\telse\r\n\t\t\treturn packet_len; // broken packet\r\n\t}\r\n\r\n\t/** Gets the RTP header length */\r\n\tpublic int getPayloadLength() {\r\n\t\tif (packet_len >= 12)\r\n\t\t\treturn packet_len - getHeaderLength();\r\n\t\telse\r\n\t\t\treturn 0; // broken packet\r\n\t}\r\n\r\n\t/** Sets the RTP payload length */\r\n\tpublic void setPayloadLength(int len) {\r\n\t\t\r\n\t\tpacket_len = getHeaderLength() + len;\r\n\t}\r\n\r\n\t// version (V): 2 bits\r\n\t// padding (P): 1 bit\r\n\t// extension (X): 1 bit\r\n\t// CSRC count (CC): 4 bits\r\n\t// marker (M): 1 bit\r\n\t// payload type (PT): 7 bits\r\n\t// sequence number: 16 bits\r\n\t// timestamp: 32 bits\r\n\t// SSRC: 32 bits\r\n\t// CSRC list: 0 to 15 items, 32 bits each\r\n\r\n\t/** Gets the version (V) */\r\n\tpublic int getVersion() {\r\n\t\tif (packet_len >= 12)\r\n\t\t\treturn (packet[0] >> 6 & 0x03);\r\n\t\telse\r\n\t\t\treturn 0; // broken packet\r\n\t}\r\n\r\n\t/** Sets the version (V) */\r\n\tpublic void setVersion(int v) {\r\n\t\tif (packet_len >= 12)\r\n\t\t\tpacket[0] = (byte) ((packet[0] & 0x3F) | ((v & 0x03) << 6));\r\n\t}\r\n\r\n\t/** Whether has padding (P) */\r\n\tpublic boolean hasPadding() {\r\n\t\tif (packet_len >= 12)\r\n\t\t\treturn getBit(packet[0], 5);\r\n\t\telse\r\n\t\t\treturn false; // broken packet\r\n\t}\r\n\r\n\t/** Set padding (P) */\r\n\tpublic void setPadding(boolean p) {\r\n\t\tif (packet_len >= 12)\r\n\t\t\tpacket[0] = setBit(p, packet[0], 5);\r\n\t}\r\n\r\n\t/** Whether has extension (X) */\r\n\tpublic boolean hasExtension() {\r\n\t\tif (packet_len >= 12)\r\n\t\t\treturn getBit(packet[0], 4);\r\n\t\telse\r\n\t\t\treturn false; // broken packet\r\n\t}\r\n\r\n\t/** Set extension (X) */\r\n\tpublic void setExtension(boolean x) {\r\n\t\tif (packet_len >= 12)\r\n\t\t\tpacket[0] = setBit(x, packet[0], 4);\r\n\t}\r\n\r\n\t/** Gets the CSCR count (CC) */\r\n\tpublic int getCscrCount() {\r\n\t\tif (packet_len >= 12)\r\n\t\t\treturn (packet[0] & 0x0F);\r\n\t\telse\r\n\t\t\treturn 0; // broken packet\r\n\t}\r\n\r\n\t/** Whether has marker (M) */\r\n\tpublic boolean hasMarker() {\r\n\t\tif (packet_len >= 12)\r\n\t\t\treturn getBit(packet[1], 7);\r\n\t\telse\r\n\t\t\treturn false; // broken packet\r\n\t}\r\n\r\n\t/** Set marker (M) */\r\n\tpublic void setMarker(boolean m) {\r\n\t\tif (packet_len >= 12)\r\n\t\t\tpacket[1] = setBit(m, packet[1], 7);\r\n\t}\r\n\r\n\t/** Gets the payload type (PT) */\r\n\tpublic int getPayloadType() {\r\n\t\tif (packet_len >= 12)\r\n\t\t\treturn (packet[1] & 0x7F);\r\n\t\telse\r\n\t\t\treturn -1; // broken packet\r\n\t}\r\n\r\n\t/** Sets the payload type (PT) */\r\n\tpublic void setPayloadType(int pt) {\r\n\t\tif (packet_len >= 12)\r\n\t\t\tpacket[1] = (byte) ((packet[1] & 0x80) | (pt & 0x7F));\r\n\t}\r\n\r\n\t/** Gets the sequence number */\r\n\tpublic int getSequenceNumber() {\r\n\t\tif (packet_len >= 12)\r\n\t\t\treturn getInt(packet, 2, 4);\r\n\t\telse\r\n\t\t\treturn 0; // broken packet\r\n\t}\r\n\r\n\t/** Sets the sequence number */\r\n\tpublic void setSequenceNumber(int sn) {\r\n\t\tif (packet_len >= 12)\r\n\t\t\tsetInt(sn, packet, 2, 4);\r\n\t}\r\n\r\n\t/** Gets the timestamp */\r\n\tpublic long getTimestamp() {\r\n\t\tif (packet_len >= 12)\r\n\t\t\treturn getLong(packet, 4, 8);\r\n\t\telse\r\n\t\t\treturn 0; // broken packet\r\n\t}\r\n\r\n\t/** Sets the timestamp */\r\n\tpublic void setTimestamp(long timestamp) {\r\n\t\tif (packet_len >= 12)\r\n\t\t\tsetLong(timestamp, packet, 4, 8);\r\n\t}\r\n\r\n\t/** Gets the SSCR */\r\n\tpublic long getSscr() {\r\n\t\tif (packet_len >= 12)\r\n\t\t\treturn getLong(packet, 8, 12);\r\n\t\telse\r\n\t\t\treturn 0; // broken packet\r\n\t}\r\n\r\n\t/** Sets the SSCR */\r\n\tpublic void setSscr(long ssrc) {\r\n\t\tif (packet_len >= 12)\r\n\t\t\tsetLong(ssrc, packet, 8, 12);\r\n\t}\r\n\r\n\t/** Gets the CSCR list */\r\n\tpublic long[] getCscrList() {\r\n\t\tint cc = getCscrCount();\r\n\t\tlong[] cscr = new long[cc];\r\n\t\tfor (int i = 0; i < cc; i++)\r\n\t\t\tcscr[i] = getLong(packet, 12 + 4 * i, 16 + 4 * i);\r\n\t\treturn cscr;\r\n\t}\r\n\r\n\t/** Sets the CSCR list */\r\n\tpublic void setCscrList(long[] cscr) {\r\n\t\tif (packet_len >= 12) {\r\n\t\t\tint cc = cscr.length;\r\n\t\t\tif (cc > 15)\r\n\t\t\t\tcc = 15;\r\n\t\t\tpacket[0] = (byte) (((packet[0] >> 4) << 4) + cc);\r\n\t\t\tcscr = new long[cc];\r\n\t\t\tfor (int i = 0; i < cc; i++)\r\n\t\t\t\tsetLong(cscr[i], packet, 12 + 4 * i, 16 + 4 * i);\r\n\t\t\t// header_len=12+4*cc;\r\n\t\t}\r\n\t}\r\n\r\n\t/** Sets the payload */\r\n\tpublic void setPayload(byte[] payload, int len) {\r\n\t\tif (packet_len >= 12) {\r\n\t\t\tint header_len = getHeaderLength();\r\n\t\t\tfor (int i = 0; i < len; i++)\r\n\t\t\t\tpacket[header_len + i] = payload[i];\r\n\t\t\tpacket_len = header_len + len;\r\n\t\t}\r\n\t}\r\n\r\n\t/** Gets the payload */\r\n\tpublic byte[] getPayload() {\r\n\t\tint header_len = getHeaderLength();\r\n\t\tint len = packet_len - header_len;\r\n\t\tbyte[] payload = new byte[len];\r\n\t\tfor (int i = 0; i < len; i++)\r\n\t\t\tpayload[i] = packet[header_len + i];\r\n\t\treturn payload;\r\n\t}\r\n\t\r\n\t/** Creates a new RTP packet */\r\n\tpublic RtpPacket(byte[] buffer, int packet_length) {\r\n\t\tpacket = buffer;\r\n\t\tpacket_len = packet_length;\r\n\t\tif (packet_len < 12)\r\n\t\t\tpacket_len = 12;\r\n\t\tinit(0x0F);\r\n\t}\r\n\r\n\t/** init the RTP packet header (only PT) */\r\n\tpublic void init(int ptype) {\r\n\t\tinit(ptype, RtpRandom.nextLong());\r\n\t}\r\n\r\n\t/** init the RTP packet header (PT and SSCR) */\r\n\tpublic void init(int ptype, long sscr) {\r\n\t\tinit(ptype, RtpRandom.nextInt(), RtpRandom.nextLong(), sscr);\r\n\t}\r\n\r\n\t/** init the RTP packet header (PT, SQN, TimeStamp, SSCR) */\r\n\tpublic void init(int ptype, int seqn, long timestamp, long sscr) {\r\n\t\tsetVersion(2);\r\n\t\tsetPayloadType(ptype);\r\n\t\tsetSequenceNumber(seqn);\r\n\t\tsetTimestamp(timestamp);\r\n\t\tsetSscr(sscr);\r\n\t}\r\n\r\n\t// *********************** Private and Static ***********************\r\n\r\n\t/** Gets int value */\r\n\t//private static int getInt(byte b) {\r\n\t//\treturn ((int) b + 256) % 256;\r\n\t//}\r\n\r\n\t/** Gets long value */\r\n\tprivate static long getLong(byte[] data, int begin, int end) {\r\n\t\tlong n = 0;\r\n\t\tfor (; begin < end; begin++) {\r\n\t\t\tn <<= 8;\r\n\t\t\tn += data[begin] & 0xFF;\r\n\t\t}\r\n\t\treturn n;\r\n\t}\r\n\r\n\t/** Sets long value */\r\n\tprivate static void setLong(long n, byte[] data, int begin, int end) {\r\n\t\tfor (end--; end >= begin; end--) {\r\n\t\t\tdata[end] = (byte) (n % 256);\r\n\t\t\tn >>= 8;\r\n\t\t}\r\n\t}\r\n\r\n\t/** Gets Int value */\r\n\tprivate static int getInt(byte[] data, int begin, int end) {\r\n\t\treturn (int) getLong(data, begin, end);\r\n\t}\r\n\r\n\t/** Sets Int value */\r\n\tprivate static void setInt(int n, byte[] data, int begin, int end) {\r\n\t\tsetLong(n, data, begin, end);\r\n\t}\r\n\r\n\t/** Gets bit value */\r\n\tprivate static boolean getBit(byte b, int bit) {\r\n\t\treturn (b >> bit) == 1;\r\n\t}\r\n\r\n\t/** Sets bit value */\r\n\tprivate static byte setBit(boolean value, byte b, int bit) {\r\n\t\tif (value)\r\n\t\t\treturn (byte) (b | (1 << bit));\r\n\t\telse\r\n\t\t\treturn (byte) ((b | (1 << bit)) ^ (1 << bit));\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtp/RtpRandom.java",
    "content": "/*\r\n * Copyright (C) 2005 Luca Veltri - University of Parma - Italy\r\n * \r\n * This file is part of MjSip (http://www.mjsip.org)\r\n * \r\n * MjSip is free software; you can redistribute it and/or modify\r\n * it under the terms of the GNU General Public License as published by\r\n * the Free Software Foundation; either version 2 of the License, or\r\n * (at your option) any later version.\r\n * \r\n * MjSip is distributed in the hope that it will be useful,\r\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\r\n * GNU General Public License for more details.\r\n * \r\n * You should have received a copy of the GNU General Public License\r\n * along with MjSip; if not, write to the Free Software\r\n * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA\r\n * \r\n * Author(s):\r\n * Luca Veltri (luca.veltri@unipr.it)\r\n */\r\n\r\npackage de.kp.net.rtp;\r\n\r\n/**\r\n * Class Random collects some static methods for generating random numbers and\r\n * other stuff.\r\n */\r\npublic class RtpRandom {\r\n\t/** The random seed */\r\n\tstatic final long seed = System.currentTimeMillis();\r\n\t// static final long seed=0;\r\n\r\n\tstatic java.util.Random rand = new java.util.Random(seed);\r\n\r\n\t// static java.util.Random rand=new java.util.Random();\r\n\r\n\t/** Returns a random integer between 0 and n-1 */\r\n\t/*\r\n\t * static public int nextInt(int n) { seed=(seed*37)%987654321; return\r\n\t * (int)(seed%n); }\r\n\t */\r\n\r\n\t/** Returns true or false respectively with probability p/100 and (1-p/100) */\r\n\t/*\r\n\t * static boolean percent(int p) { return integer(100)<p; }\r\n\t */\r\n\r\n\t/** Sets the seed of this random number generator using a single long seed */\r\n\tpublic static void setSeed(long seed) {\r\n\t\trand.setSeed(seed);\r\n\t}\r\n\r\n\t/** Returns a random integer */\r\n\tpublic static int nextInt() {\r\n\t\treturn rand.nextInt();\r\n\t}\r\n\r\n\t/** Returns a random integer between 0 and n-1 */\r\n\tpublic static int nextInt(int n) {\r\n\t\treturn Math.abs(rand.nextInt()) % n;\r\n\t}\r\n\r\n\t/** Returns a random long */\r\n\tpublic static long nextLong() {\r\n\t\treturn rand.nextLong();\r\n\t}\r\n\r\n\t/** Returns a random boolean */\r\n\tpublic static boolean nextBoolean() {\r\n\t\treturn rand.nextInt(2) == 1;\r\n\t}\r\n\r\n\t/** Returns a random array of bytes */\r\n\tpublic static byte[] nextBytes(int len) {\r\n\t\tbyte[] buff = new byte[len];\r\n\t\tfor (int i = 0; i < len; i++)\r\n\t\t\tbuff[i] = (byte) nextInt(256);\r\n\t\treturn buff;\r\n\t}\r\n\r\n\t/** Returns a random String */\r\n\tpublic static String nextString(int len) {\r\n\t\tbyte[] buff = new byte[len];\r\n\t\tfor (int i = 0; i < len; i++) {\r\n\t\t\tint n = nextInt(62);\r\n\t\t\tbuff[i] = (byte) ((n < 10) ? 48 + n : ((n < 36) ? 55 + n : 61 + n));\r\n\t\t}\r\n\t\treturn new String(buff);\r\n\t}\r\n\r\n\t/** Returns a random numeric String */\r\n\tpublic static String nextNumString(int len) {\r\n\t\tbyte[] buff = new byte[len];\r\n\t\tfor (int i = 0; i < len; i++)\r\n\t\t\tbuff[i] = (byte) (48 + nextInt(10));\r\n\t\treturn new String(buff);\r\n\t}\r\n\r\n\t/** Returns a random hexadecimal String */\r\n\tpublic static String nextHexString(int len) {\r\n\t\tbyte[] buff = new byte[len];\r\n\t\tfor (int i = 0; i < len; i++) {\r\n\t\t\tint n = nextInt(16);\r\n\t\t\tbuff[i] = (byte) ((n < 10) ? 48 + n : 87 + n);\r\n\t\t}\r\n\t\treturn new String(buff);\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtp/RtpSender.java",
    "content": "package de.kp.net.rtp;\r\n\r\nimport java.io.IOException;\r\nimport java.util.Vector;\r\n\r\n/**\r\n * This class is responsible for sending\r\n * RTP packets via RTP to a set of registered\r\n * consumers.\r\n * \r\n * @author Stefan Krusche (krusche@dr-kruscheundpartner.de)\r\n *\r\n */\r\npublic class RtpSender {\r\n\r\n\tprivate static RtpSender instance = new RtpSender();\r\n\t\r\n\t/*\r\n\t * This vector holds a set of RTP sockets that\r\n\t * are registered consumers of RTP packets.\r\n\t */\r\n\tprivate Vector<RtpSocket> receivers;\r\n\t\r\n\tprivate RtpSender() {\r\n\t\treceivers = new Vector<RtpSocket>();\r\n\t}\r\n\t\r\n\tpublic int getReceiverCount() {\r\n\t\treturn receivers.size();\r\n\t}\r\n\t\r\n\tpublic static RtpSender getInstance() {\r\n\t\tif (instance == null) instance = new RtpSender();\r\n\t\treturn instance;\r\n\t}\r\n\t\r\n\t/**\r\n\t * Register RTP packet consumer\r\n\t * \r\n\t * @param receiver\r\n\t */\r\n\tpublic void addReceiver(RtpSocket receiver) {\r\n\t\treceivers.add(receiver);\r\n\t}\r\n\t\r\n\t/**\r\n\t * De-register RTP packet consumer\r\n\t * @param receiver\r\n\t */\r\n\tpublic void removeReceiver(RtpSocket receiver) {\r\n\t\treceivers.remove(receiver);\r\n\t}\r\n\t\r\n\t/**\r\n\t * Send RTP packet to all registered RTP\r\n\t * packet consumers.\r\n\t * \r\n\t * @param rtpPacket\r\n\t * @throws IOException\r\n\t */\r\n\tpublic synchronized void send(RtpPacket rtpPacket) throws IOException {\r\n\r\n\t\tfor (RtpSocket receiver:receivers) {\r\n\t\t\treceiver.send(rtpPacket);\r\n\t\t}\r\n\t\t\r\n\t}\r\n\r\n\t/**\r\n\t * Send RTP packet to all registered RTP\r\n\t * packet consumers.\r\n\t * \r\n\t * @param rtpPacket\r\n\t * @throws IOException\r\n\t */\r\n\tpublic synchronized void send(byte[] data) throws IOException {\r\n\r\n\t\tfor (RtpSocket receiver:receivers) {\r\n\t\t\treceiver.send(data);\r\n\t\t}\r\n\t\t\r\n\t}\r\n\r\n\t/**\r\n\t * De-register all registered RTP consumers\r\n\t */\r\n\tpublic void clear() {\r\n\t\treceivers.clear();\r\n\t}\r\n\t\r\n\tpublic void stop() {\r\n\t\t// TODO\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtp/RtpSocket.java",
    "content": "package de.kp.net.rtp;\r\n\r\n/*\r\n * Copyright (C) 2009 The Sipdroid Open Source Project\r\n * Copyright (C) 2005 Luca Veltri - University of Parma - Italy\r\n * \r\n * This file is part of Sipdroid (http://www.sipdroid.org)\r\n * \r\n * Sipdroid is free software; you can redistribute it and/or modify\r\n * it under the terms of the GNU General Public License as published by\r\n * the Free Software Foundation; either version 3 of the License, or\r\n * (at your option) any later version.\r\n * \r\n * This source code is distributed in the hope that it will be useful,\r\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\r\n * GNU General Public License for more details.\r\n * \r\n * You should have received a copy of the GNU General Public License\r\n * along with this source code; if not, write to the Free Software\r\n * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA\r\n */\r\n\r\nimport java.net.DatagramSocket;\r\nimport java.net.InetAddress;\r\nimport java.net.DatagramPacket;\r\nimport java.net.SocketException;\r\nimport java.io.IOException;\r\n\r\n\r\n/**\r\n * RtpSocket implements a RTP socket for receiving and sending RTP packets.\r\n * <p>\r\n * RtpSocket is associated to a DatagramSocket that is used to send and/or\r\n * receive RtpPackets.\r\n */\r\npublic class RtpSocket {\r\n\r\n\t/** UDP socket */\r\n\tDatagramSocket socket;\r\n\tDatagramPacket datagram;\r\n\t\r\n\t/** Remote address */\r\n\tInetAddress remoteAddress;\r\n\r\n\t/** Remote port */\r\n\tint remotePort;\r\n\r\n\t/** \r\n\t * An RtpSocket may be suspended from sending or receiving\r\n\t * UDP data packets\r\n\t */\r\n\t\r\n\tboolean suspended = false;\r\n\t\r\n\t/** Creates a new RTP socket (sender and receiver) \r\n\t * @throws SocketException */\r\n\tpublic RtpSocket(InetAddress remoteAddress, int remotePort) throws SocketException {\r\n\t\t\r\n\t\tthis.socket = new DatagramSocket();\r\n\t\tthis.socket.connect(remoteAddress, remotePort);\r\n\t\t\r\n\t\tthis.remoteAddress = remoteAddress;\r\n\t\tthis.remotePort    = remotePort;\r\n\t\t\r\n\t\tdatagram = new DatagramPacket(new byte[1],1);\r\n\t\r\n\t}\r\n\r\n\t/** Creates a new RTP socket (sender and receiver) **/\r\n\tpublic RtpSocket(DatagramSocket socket, InetAddress remoteAddress, int remotePort) {\r\n\t\t\r\n\t\tthis.socket = socket;\r\n\t\t\r\n\t\t// initialize receiver address & port\r\n\t\tthis.remoteAddress = remoteAddress;\r\n\t\tthis.remotePort    = remotePort;\r\n\t\t\r\n\t\tdatagram = new DatagramPacket(new byte[1],1);\r\n\t\r\n\t}\r\n\r\n\t/** Returns the RTP DatagramSocket */\r\n\tpublic DatagramSocket getSocket() {\r\n\t\treturn this.socket;\r\n\t}\r\n\r\n\t/** Receives a RTP packet from this socket */\r\n\tpublic void receive(RtpPacket rtpPacket) throws IOException {\r\n\r\n\t\tdatagram.setData(rtpPacket.getPacket());\r\n\t\tdatagram.setLength(rtpPacket.packet.length);\r\n\t\t\r\n\t\tsocket.receive(datagram);\r\n\t\tif (!socket.isConnected())\r\n\t\t\tsocket.connect(datagram.getAddress(), datagram.getPort());\r\n\t\t\r\n\t\trtpPacket.packet_len = datagram.getLength();\r\n\t\r\n\t}\r\n\r\n\t/** Sends a RTP packet from this socket */\r\n\tpublic void send(RtpPacket rtpPacket) throws IOException {\r\n\r\n\t\tif (this.suspended == true) return;\r\n\t\t\r\n\t\tdatagram.setData(rtpPacket.getPacket());\r\n\t\tdatagram.setLength(rtpPacket.getLength());\r\n\t\t\r\n\t\tdatagram.setAddress(remoteAddress);\r\n\t\tdatagram.setPort(remotePort);\r\n\t\t\r\n\t\tsocket.send(datagram);\r\n\t\r\n\t}\r\n\r\n\t/** Sends a RTP packet from this socket */\r\n\tpublic void send(byte[] data) throws IOException {\r\n\r\n\t\tif (this.suspended == true) return;\r\n\t\t\r\n\t\tdatagram.setData(data);\r\n\t\tdatagram.setLength(data.length);\r\n\t\t\r\n\t\tdatagram.setAddress(remoteAddress);\r\n\t\tdatagram.setPort(remotePort);\r\n\t\t\r\n\t\tsocket.send(datagram);\r\n\t\r\n\t}\r\n\t\r\n\tpublic void suspend(boolean suspended) {\r\n\t\tthis.suspended = suspended;\r\n\t}\r\n\t\r\n\t/** Closes this socket */\r\n\tpublic void close() { // socket.close();\r\n\t}\r\n\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtp/packetizer/AbstractPacketizer.java",
    "content": "package de.kp.net.rtp.packetizer;\r\n\r\nimport java.io.IOException;\r\nimport java.io.InputStream;\r\n\r\nimport de.kp.net.rtp.RtpSender;\r\n\r\nabstract public class AbstractPacketizer extends Thread {\r\n\r\n\tprotected InputStream fis;\r\n\tprotected RtpSender rtpSender;\r\n\tprotected boolean running = false;\r\n\r\n\tpublic AbstractPacketizer() {\r\n\t\tsuper();\r\n\t}\r\n\r\n\tpublic AbstractPacketizer(Runnable runnable) {\r\n\t\tsuper(runnable);\r\n\t}\r\n\r\n\tpublic AbstractPacketizer(String threadName) {\r\n\t\tsuper(threadName);\r\n\t}\r\n\r\n\tpublic AbstractPacketizer(Runnable runnable, String threadName) {\r\n\t\tsuper(runnable, threadName);\r\n\t}\r\n\r\n\tpublic AbstractPacketizer(ThreadGroup group, Runnable runnable) {\r\n\t\tsuper(group, runnable);\r\n\t}\r\n\r\n\tpublic AbstractPacketizer(ThreadGroup group, String threadName) {\r\n\t\tsuper(group, threadName);\r\n\t}\r\n\r\n\tpublic AbstractPacketizer(ThreadGroup group, Runnable runnable, String threadName) {\r\n\t\tsuper(group, runnable, threadName);\r\n\t}\r\n\r\n\tpublic AbstractPacketizer(ThreadGroup group, Runnable runnable, String threadName, long stackSize) {\r\n\t\tsuper(group, runnable, threadName, stackSize);\r\n\t}\r\n\r\n\tpublic void startStreaming() {\r\n\t\trunning = true;\r\n\t\tstart();\r\n\t}\r\n\r\n\tpublic void stopStreaming() {\r\n\t\ttry {\r\n\t\t\tfis.close();\r\n\t\t} catch (IOException e) {\r\n\t\t\t\r\n\t\t}\r\n\t\trunning = false;\r\n\t}\r\n\t\r\n\r\n\r\n}"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtp/packetizer/H263Packetizer.java",
    "content": "package de.kp.net.rtp.packetizer;\r\n\r\nimport java.io.IOException;\r\nimport java.io.InputStream;\r\nimport java.net.SocketException;\r\n\r\nimport android.os.SystemClock;\r\nimport android.util.Log;\r\nimport de.kp.net.rtp.RtpPacket;\r\nimport de.kp.net.rtp.RtpSender;\r\nimport de.kp.net.rtsp.RtspConstants;\r\n\r\npublic class H263Packetizer extends AbstractPacketizer implements Runnable {\r\n\r\n\tprivate String TAG = \"H263Sender\";\r\n\r\n\tprivate boolean videoQualityHigh = true;\r\n\t// private int fps;\r\n\t\r\n\tprivate boolean change;\r\n\t\r\n\tpublic H263Packetizer(InputStream fis) throws SocketException {\r\n\t\tthis.fis = fis;\r\n\t\tthis.rtpSender = RtpSender.getInstance(); \r\n\t}\r\n\t\r\n\tpublic void run() {\r\n\t\t\r\n\t\tint frame_size = 1400;\r\n\t\tbyte[] buffer = new byte[frame_size + 14];\r\n\t\tbuffer[12] = 4;\r\n\r\n\t\tRtpPacket rtpPacket = new RtpPacket(buffer, 0);\r\n\t\t\r\n\t\tint seqn = 0;\r\n\t\tint num, number = 0, src, dest, len = 0, head = 0, lasthead = 0, lasthead2 = 0, cnt = 0, stable = 0;\r\n\t\t\r\n\t\tlong now, lasttime = 0;\r\n\t\t\r\n\t\tdouble avgrate = videoQualityHigh ? 45000 : 24000;\r\n\t\tdouble avglen = avgrate / 20;\r\n\r\n\t\trtpPacket.setPayloadType(RtspConstants.RTP_H263_PAYLOADTYPE);\r\n\r\n\t\t// while (Receiver.listener_video != null && videoValid()) {\r\n\t\twhile (running) {\r\n\r\n\t\t\tnum = -1;\r\n\t\t\ttry {\r\n\t\t\t\tnum = fis.read(buffer, 14 + number, frame_size - number);\r\n\r\n\t\t\t} catch (IOException e) {\r\n\t\t\t\tLog.w(TAG , e.getMessage());\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\r\n\t\t\tif (num < 0) {\r\n\t\t\t\ttry {\r\n\t\t\t\t\tsleep(20);\r\n\t\t\t\t} catch (InterruptedException e) {\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t}\r\n\t\t\t\tcontinue;\r\n\t\t\t}\r\n\t\t\tnumber += num;\r\n\t\t\thead += num;\r\n\r\n\t\t\ttry {\r\n\t\t\t\r\n\t\t\t\tnow = SystemClock.elapsedRealtime();\r\n\t\t\t\t\r\n\t\t\t\tif (lasthead != head + fis.available() && ++stable >= 5 && now - lasttime > 700) {\r\n\t\t\t\t\tif (cnt != 0 && len != 0)\r\n\t\t\t\t\t\tavglen = len / cnt;\r\n\t\t\t\t\t\r\n\t\t\t\t\tif (lasttime != 0) {\r\n\t\t\t\t\t\t// fps = (int) ((double) cnt * 1000 / (now - lasttime));\r\n\t\t\t\t\t\tavgrate = (double) ((head + fis.available()) - lasthead2) * 1000 / (now - lasttime);\r\n\t\t\t\t\t}\r\n\t\t\t\t\t\r\n\t\t\t\t\tlasttime = now;\r\n\t\t\t\t\tlasthead = head + fis.available();\r\n\t\t\t\t\t\r\n\t\t\t\t\tlasthead2 = head;\r\n\t\t\t\t\tlen = cnt = stable = 0;\r\n\t\t\t\t}\r\n\t\t\t\r\n\t\t\t} catch (IOException e1) {\r\n\t\t\t\tLog.w(TAG, e1.getMessage());\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\r\n\t\t\tfor (num = 14; num <= 14 + number - 2; num++)\r\n\t\t\t\tif (buffer[num] == 0 && buffer[num + 1] == 0)\r\n\t\t\t\t\tbreak;\r\n\t\t\t\r\n\t\t\tif (num > 14 + number - 2) {\r\n\t\t\t\tnum = 0;\r\n\t\t\t\trtpPacket.setMarker(false);\r\n\t\t\t} else {\r\n\t\t\t\tnum = 14 + number - num;\r\n\t\t\t\trtpPacket.setMarker(true);\r\n\t\t\t}\r\n\r\n\t\t\trtpPacket.setSequenceNumber(seqn++);\r\n\t\t\trtpPacket.setPayloadLength(number - num + 2);\r\n\t\t\t\r\n\t\t\tif (seqn > 10)\r\n\t\t\t\t\r\n\t\t\t\ttry {\r\n\t\t\t\t\t\r\n\t\t\t\t\trtpSender.send(rtpPacket);\r\n\t\t\t\t\tlen += number - num;\r\n\r\n\t\t\t\t} catch (IOException e) {\r\n\t\t\t\t\tLog.w(TAG, \"RTP packet sent failed\");\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t}\r\n\r\n\t\t\tif (num > 0) {\r\n\r\n\t\t\t\tnum -= 2;\r\n\t\t\t\tdest = 14;\r\n\t\t\t\t\r\n\t\t\t\tsrc = 14 + number - num;\r\n\t\t\t\tif (num > 0 && buffer[src] == 0) {\r\n\t\t\t\t\tsrc++;\r\n\t\t\t\t\tnum--;\r\n\t\t\t\t}\r\n\t\t\t\t\r\n\t\t\t\tnumber = num;\r\n\t\t\t\twhile (num-- > 0)\r\n\t\t\t\t\tbuffer[dest++] = buffer[src++];\r\n\t\t\t\t\r\n\t\t\t\tbuffer[12] = 4;\r\n\r\n\t\t\t\tcnt++;\r\n\t\t\t\ttry {\r\n\t\t\t\t\tif (avgrate != 0)\r\n\t\t\t\t\t\tThread.sleep((int) (avglen / avgrate * 1000));\r\n\t\t\t\t} catch (Exception e) {\r\n\t\t\t\t\tbreak;\r\n\t\t\t\t}\r\n\t\t\t\trtpPacket.setTimestamp(SystemClock.elapsedRealtime() * 90);\r\n\r\n\t\t\t} else {\r\n\t\t\t\tnumber = 0;\r\n\t\t\t\tbuffer[12] = 0;\r\n\t\t\t}\r\n\t\t\tif (change) {\r\n\t\t\t\tchange = false;\r\n\t\t\t\tlong time = SystemClock.elapsedRealtime();\r\n\r\n\t\t\t\ttry {\r\n\t\t\t\t\twhile (fis.read(buffer, 14, frame_size) > 0 && SystemClock.elapsedRealtime() - time < 3000)\r\n\t\t\t\t\t\t;\r\n\t\t\t\t} catch (Exception e) {\r\n\t\t\t\t}\r\n\t\t\t\tnumber = 0;\r\n\t\t\t\tbuffer[12] = 0;\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\trtpSender.stop();\r\n\r\n\t\ttry {\r\n\t\t\twhile (fis.read(buffer, 0, frame_size) > 0)\r\n\t\t\t\t;\r\n\t\t} catch (IOException e) {\r\n\t\t}\r\n\t}\t\r\n}"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtp/packetizer/H264Fifo.java",
    "content": "/*\n * Copyright (C) 2011-2012 GUIGUI Simon, fyhertz@gmail.com\n * \n * This file is part of Spydroid (http://code.google.com/p/spydroid-ipcamera/)\n * \n * Spydroid is free software; you can redistribute it and/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation; either version 3 of the License, or\n * (at your option) any later version.\n * \n * This source code is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n * GNU General Public License for more details.\n * \n * You should have received a copy of the GNU General Public License\n * along with this source code; if not, write to the Free Software\n * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA\n */\n\npackage de.kp.net.rtp.packetizer;\n\npublic class H264Fifo {\n\n\tprivate int length = 0, tail = 0, head = 0;\n\tprivate byte[] buffer;\n\t\n\tpublic H264Fifo(int length) {\n\t\tthis.length = length;\n\t\tbuffer = new byte[length];\n\t}\n\t\n\tpublic void write(byte[] buffer, int offset, int length) {\n\t\t\n\t\tif (tail+length<this.length) {\n\t\t\tSystem.arraycopy(buffer, offset, this.buffer, tail, length);\n\t\t\ttail += length;\n\t\t}\n\t\telse {\n\t\t\tint u = this.length-tail;\n\t\t\tSystem.arraycopy(buffer, offset, this.buffer, tail, u);\n\t\t\tSystem.arraycopy(buffer, offset+u, this.buffer, 0, length-u);\n\t\t\ttail = length-u;\n\t\t}\n\n\t}\n\t\n\tpublic int read(byte[] buffer, int offset, int length) {\n\t\t\n\t\tlength = length>available() ? available() : length;\n\t\t\n\t\tif (head+length<this.length) {\n\t\t\tSystem.arraycopy(this.buffer, head, buffer, offset, length);\n\t\t\thead += length;\n\t\t}\n\t\telse {\n\t\t\tint u = this.length-head;\n\t\t\tSystem.arraycopy(this.buffer, head, buffer, offset, u);\n\t\t\tSystem.arraycopy(this.buffer, 0, buffer, offset+u, length-u);\n\t\t\thead = length-u;\n\t\t}\n\t\t\n\t\treturn length;\n\t}\n\t\n\tpublic int available() {\n\t\treturn (tail>=head) ? tail-head : this.length-(head-tail) ; \n\t}\n\t\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtp/packetizer/H264Packetizer.java",
    "content": "package de.kp.net.rtp.packetizer;\r\n\r\nimport java.io.IOException;\r\nimport java.io.InputStream;\r\nimport java.net.SocketException;\r\n\r\nimport de.kp.net.rtp.RtpPacket;\r\nimport de.kp.net.rtp.RtpSender;\r\nimport de.kp.net.rtsp.RtspConstants;\r\n\r\nimport android.os.SystemClock;\r\nimport android.util.Log;\r\n\r\npublic class H264Packetizer extends AbstractPacketizer implements Runnable {\r\n\r\n\tprivate final int packetSize = 1400;\r\n\t\r\n\tprivate long oldtime = SystemClock.elapsedRealtime(), delay = 20;\r\n\tprivate long latency, oldlat = oldtime;\r\n\t\r\n\tprivate int available = 0, oldavailable = 0, nalUnitLength = 0, numberNalUnit = 0, len = 0;\r\n\tprivate H264Fifo fifo = new H264Fifo(500000);\r\n\r\n\tprotected InputStream fis = null;\r\n\r\n\tprotected byte[] buffer = new byte[16384 * 2];\r\n\r\n\tprotected final int rtpHeaderLength = 12; // Rtp header length\r\n\tprivate String TAG = \"H264Packetizer\";\r\n\r\n\tpublic H264Packetizer(InputStream fis) throws SocketException {\r\n\t\tthis.fis = fis;\r\n\t\tthis.rtpSender = RtpSender.getInstance();\r\n\t}\r\n\r\n\tpublic void run() {\r\n\r\n\t\tint seqn = 0;\r\n\t\tbyte[] buffer = new byte[16384*2];\r\n\r\n\t\tRtpPacket rtpPacket = new RtpPacket(buffer, 0);\r\n\t\trtpPacket.setPayloadType(RtspConstants.RTP_H264_PAYLOADTYPE);\r\n\r\n\t\t// skip the mpeg4 header\r\n\r\n\t\ttry {\r\n\r\n\t\t\t// skip all atoms preceding mdat atom\r\n\t\t\tskipMDAT();\r\n\r\n\t\t\t// some phones do not set length correctly when stream is not\r\n\t\t\t// seekable, still we need to skip the header\r\n\t\t\tif (len <= 0) {\r\n\t\t\t\twhile (true) {\r\n\t\t\t\t\twhile (fis.read() != 'm')\r\n\t\t\t\t\t\t;\r\n\t\t\t\t\tfis.read(buffer, rtpHeaderLength, 3);\r\n\t\t\t\t\tif (buffer[rtpHeaderLength] == 'd' && buffer[rtpHeaderLength + 1] == 'a' && buffer[rtpHeaderLength + 2] == 't')\r\n\t\t\t\t\t\tbreak;\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t\tlen = 0;\r\n\r\n\t\t} catch (IOException e) {\r\n\t\t\tLog.w(TAG , e.getMessage());\r\n\t\t\treturn;\r\n\t\t}\r\n\r\n\t\twhile (running) {\r\n\t\t\t\r\n\t\t\t/* If there are NAL units in the FIFO ready to be sent, we send one */\r\n\t\t\t// send();\r\n\r\n\t\t\t/*\r\n\t\t\t * Read a NAL unit in the FIFO and send it If it is too big, we\r\n\t\t\t * split it in FU-A units (RFC 3984)\r\n\t\t\t */\r\n\t\t\tint sum = 1, len = 0, nalUnitLength;\r\n\r\n\t\t\tif (numberNalUnit != 0) {\r\n\r\n\t\t\t\t/* Read nal unit length (4 bytes) and nal unit header (1 byte) */\r\n\t\t\t\tlen = fifo.read(buffer, rtpHeaderLength, 5);\r\n\t\t\t\tnalUnitLength = (buffer[rtpHeaderLength + 3] & 0xFF) + (buffer[rtpHeaderLength + 2] & 0xFF) * 256\r\n\t\t\t\t\t\t+ (buffer[rtpHeaderLength + 1] & 0xFF) * 65536;\r\n\r\n//\t\t\t\t Log.d(TAG ,\"send- NAL unit length: \" + nalUnitLength);\r\n\r\n\t\t\t\t// rsock.updateTimestamp(SystemClock.elapsedRealtime() * 90);\r\n\t\t\t\trtpPacket.setTimestamp(SystemClock.elapsedRealtime() * 90);\r\n\r\n\t\t\t\t/* Small nal unit => Single nal unit */\r\n\t\t\t\tif (nalUnitLength <= packetSize - rtpHeaderLength - 2) {\r\n\r\n\t\t\t\t\tbuffer[rtpHeaderLength] = buffer[rtpHeaderLength + 4];\r\n\t\t\t\t\tlen = fifo.read(buffer, rtpHeaderLength + 1, nalUnitLength - 1);\r\n\r\n\t\t\t\t\trtpPacket.setMarker(true);\r\n\r\n\t\t\t\t\ttry {\r\n\r\n\t\t\t\t\t\trtpPacket.setSequenceNumber(seqn++);\r\n\t\t\t\t\t\trtpPacket.setPayloadLength(nalUnitLength);\r\n\t\t\t\t\t\r\n\t\t\t\t\t\trtpSender.send(rtpPacket);\r\n\r\n\t\t\t\t\t} catch (IOException e) {\r\n\t\t\t\t\t\t// TODO Auto-generated catch block\r\n\t\t\t\t\t\te.printStackTrace();\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t}\r\n\r\n\t\t\t\t/* Large nal unit => Split nal unit */\r\n\t\t\t\telse {\r\n\r\n\t\t\t\t\t/* Set FU-A indicator */\r\n\t\t\t\t\tbuffer[rtpHeaderLength] = 28;\r\n\t\t\t\t\tbuffer[rtpHeaderLength] += (buffer[rtpHeaderLength + 4] & 0x60) & 0xFF; // FU indicator\r\n\t\t\t\t\t// NRI\r\n\t\t\t\t\t// buffer[rtphl] += 0x80;\r\n\r\n\t\t\t\t\t/* Set FU-A header */\r\n\t\t\t\t\tbuffer[rtpHeaderLength + 1] = (byte) (buffer[rtpHeaderLength + 4] & 0x1F); // FU header\r\n\t\t\t\t\t// type\r\n\t\t\t\t\tbuffer[rtpHeaderLength + 1] += 0x80; // Start bit\r\n\r\n\t\t\t\t\twhile (sum < nalUnitLength) {\r\n\r\n\t\t\t\t\t\tif (!running)\r\n\t\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tlen = fifo.read(buffer, \r\n\t\t\t\t\t\t\t\trtpHeaderLength + 2, \r\n\t\t\t\t\t\t\t\tnalUnitLength - sum > packetSize - rtpHeaderLength - 2 ? packetSize - rtpHeaderLength - 2 : nalUnitLength - sum);\r\n\t\t\t\t\t\tsum += len;\r\n\t\t\t\t\t\tif (len < 0)\r\n\t\t\t\t\t\t\tbreak;\r\n\r\n\t\t\t\t\t\t/* Last packet before next NAL */\r\n\t\t\t\t\t\tif (sum >= nalUnitLength) {\r\n\t\t\t\t\t\t\t// End bit on\r\n\t\t\t\t\t\t\tbuffer[rtpHeaderLength + 1] += 0x40;\r\n\r\n\t\t\t\t\t\t\t// rsock.markNextPacket();\r\n\t\t\t\t\t\t\trtpPacket.setMarker(true);\r\n\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\ttry {\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t// rsock.send(len + rtpHeaderLength + 2);\r\n\t\t\t\t\t\t\trtpPacket.setSequenceNumber(seqn++);\r\n\t\t\t\t\t\t\trtpPacket.setPayloadLength(len + 2);\r\n\r\n\t\t\t\t\t\t\trtpSender.send(rtpPacket);\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t} catch (IOException e) {\r\n\t\t\t\t\t\t\t// TODO Auto-generated catch block\r\n\t\t\t\t\t\t\te.printStackTrace();\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\t/* Switch start bit */\r\n\t\t\t\t\t\tbuffer[rtpHeaderLength + 1] = (byte) (buffer[rtpHeaderLength + 1] & 0x7F);\r\n\r\n//\t\t\t\t\t\tLog.d(TAG,\"send--- FU-A unit, end:\"+(boolean)(sum >= nalUnitLength));\r\n\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t}\r\n\r\n\t\t\t\tnumberNalUnit--;\r\n\r\n//\t\t\t\tLog.d(TAG,\"NAL UNIT SENT> \" + numberNalUnit);\r\n\t\t\t}\r\n\r\n\t\t\t/*\r\n\t\t\t * If the camera has delivered new NAL units we copy them in the\r\n\t\t\t * FIFO Then, the delay between two send call is latency/nbNalu\r\n\t\t\t * with: latency: how long it took to the camera to output new data\r\n\t\t\t * nbNalu: number of NAL units in the FIFO\r\n\t\t\t */\r\n\t\t\tfillFifo();\r\n\r\n\t\t\ttry {\r\n\t\t\t\tThread.sleep(delay);\r\n\r\n\t\t\t} catch (InterruptedException e) {\r\n\t\t\t\treturn;\r\n\t\t\t}\r\n\r\n\t\t}\r\n\r\n\t}\r\n\r\n\t// skip all atoms preceeding mdat atom\r\n\t\r\n\tprivate void skipMDAT() throws IOException {\r\n\r\n\t\twhile (true) {\r\n\t\t\t\r\n\t\t\tfis.read(buffer, rtpHeaderLength, 8);\r\n\t\t\tif (buffer[rtpHeaderLength + 4] == 'm' && buffer[rtpHeaderLength + 5] == 'd' && buffer[rtpHeaderLength + 6] == 'a' && buffer[rtpHeaderLength + 7] == 't')\r\n\t\t\t\tbreak;\r\n\t\t\t\r\n\t\t\tlen = (buffer[rtpHeaderLength + 3] & 0xFF) + (buffer[rtpHeaderLength + 2] & 0xFF) * 256 + (buffer[rtpHeaderLength + 1] & 0xFF) * 65536;\r\n\t\t\tif (len <= 0)\r\n\t\t\t\tbreak;\r\n\r\n\t\t\tfis.read(buffer, rtpHeaderLength, len - 8);\r\n\t\t\r\n\t\t}\r\n\r\n\t}\r\n\t\r\n\tprivate void fillFifo() {\r\n\r\n\t\ttry {\r\n\r\n\t\t\tavailable = fis.available();\r\n\r\n\t\t\tif (available > oldavailable) {\r\n\r\n\t\t\t\tlong now = SystemClock.elapsedRealtime();\r\n\t\t\t\tlatency = now - oldlat;\r\n\t\t\t\t\r\n\t\t\t\toldlat = now;\r\n\t\t\t\toldavailable = available;\r\n\t\t\t}\r\n\r\n\t\t\tif (numberNalUnit == 0 && available > 4) {\r\n\t\t\t\tnumberNalUnit = nalUnitLength - len == 0 ? numberNalUnit : numberNalUnit + 1;\r\n\t\t\t} else\r\n\t\t\t\treturn;\r\n\r\n\t\t\twhile ((available = fis.available()) >= 4) {\r\n\r\n\t\t\t\tfis.read(buffer, rtpHeaderLength, nalUnitLength - len);\r\n\t\t\t\tfifo.write(buffer, rtpHeaderLength, nalUnitLength - len);\r\n\r\n\t\t\t\t/* Read NAL unit and copy it in the fifo */\r\n\t\t\t\tlen = fis.read(buffer, rtpHeaderLength, 4);\r\n\t\t\t\tnalUnitLength = (buffer[rtpHeaderLength + 3] & 0xFF) + (buffer[rtpHeaderLength + 2] & 0xFF) * 256\r\n\t\t\t\t\t\t+ (buffer[rtpHeaderLength + 1] & 0xFF) * 65536;\r\n\t\t\t\t\r\n\t\t\t\tlen = fis.read(buffer, rtpHeaderLength + 4, nalUnitLength);\r\n\t\t\t\tfifo.write(buffer, rtpHeaderLength, len + 4);\r\n\r\n\t\t\t\tif (len == nalUnitLength)\r\n\t\t\t\t\tnumberNalUnit++;\r\n\r\n//\t\t\t\tLog.i(TAG,\"fifo- available: \" + available + \", len: \" + len + \", naluLength: \" + nalUnitLength);\r\n\r\n\t\t\t\tif (fis.available() < 4) {\r\n\r\n\t\t\t\t\tdelay = latency / numberNalUnit;\r\n\t\t\t\t\toldavailable = fis.available();\r\n//\t\t\t\t\tLog.i(TAG,\"fifo- latency: \"+latency+\", nbNalu: \"+numberNalUnit+\", delay: \"+delay+\" avfifo: \"+fifo.available());\r\n\r\n\t\t\t\t}\r\n\r\n\t\t\t}\r\n\r\n\t\t}\r\n\r\n\t\tcatch (IOException e) {\r\n\t\t\treturn;\r\n\t\t}\r\n\r\n\t}\r\n\r\n    // Useful for debug\r\n    protected String printBuffer(int start,int end) {\r\n            String str = \"\";\r\n            for (int i=start;i<end;i++) str+=\",\"+Integer.toHexString(buffer[i]&0xFF);\r\n            return str;\r\n    }\r\n\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtp/recorder/MediaRtpSender.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage de.kp.net.rtp.recorder;\r\n\nimport com.orangelabs.rcs.core.ims.protocol.rtp.MediaRegistry;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.Processor;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.RtpException;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.codec.Codec;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.Format;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.media.MediaInput;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.stream.MediaCaptureStream;\nimport com.orangelabs.rcs.utils.logger.Logger;\n\nimport de.kp.net.rtp.stream.RtpOutputStream;\n\r\n/**\r\n * Media RTP sender\r\n */\r\npublic class MediaRtpSender {\r\n\t/**\r\n\t * Format\r\n\t */\r\n\tprivate Format format;\r\n\r\n    /**\r\n     * Media processor\r\n     */\r\n    private Processor processor = null;\n\n    /**\n     * MediaCaptureStream\n     */\n    MediaCaptureStream inputStream = null;\n\n    /**\n     * RTP output stream\n     */\n    private RtpOutputStream outputStream = null;\n\n    /**\n     * The logger\n     */\r\n    private Logger logger = Logger.getLogger(this.getClass().getName());\n\n    public MediaRtpSender(Format format) {\n    \tthis.format = format;\n    }\n\n    public void prepareSession(MediaInput player) throws RtpException {\n    \t\n    \ttry {\n    \t\t// Create the input stream\n            inputStream = new MediaCaptureStream(format, player);\n    \t\tinputStream.open();\n\t\t\tif (logger.isActivated()) {\n\t\t\t\tlogger.debug(\"Input stream: \" + inputStream.getClass().getName());\n\t\t\t}\n\n            // Create the output stream aka the Renderer\n            outputStream = new RtpOutputStream();\n            // outputStream.open();\n\t\t\t\n            if (logger.isActivated()) {\n\t\t\t\tlogger.debug(\"Output stream: \" + outputStream.getClass().getName());\n\t\t\t}\n\n        \t// Create the codec chain\n        \tCodec[] codecChain = MediaRegistry.generateEncodingCodecChain(format.getCodec());\n\n            // Create the media processor\n    \t\tprocessor = new Processor(inputStream, outputStream, codecChain);\n\n        \tif (logger.isActivated()) {\n        \t\tlogger.debug(\"Broadcast Session has been prepared with success\");\n            }\n        } catch(Exception e) {\n\n        \tif (logger.isActivated()) {\n        \t\tlogger.error(\"Can't prepare resources correctly\", e);\n        \t}\n        \tthrow new RtpException(\"Can't prepare resources\");\n        }\n    }\n\n    /**\n     * Start the RTP session\n     */\r\n    public void startSession() {\r\n    \tif (logger.isActivated()) {\r\n    \t\tlogger.debug(\"Start the session\");\r\n    \t}\r\n\r\n    \t// Start the media processor\r\n\t\tif (processor != null) {\r\n\t\t\tprocessor.startProcessing();\r\n\t\t}\r\n    }\r\n\r\n    /**\r\n     * Stop the RTP session\r\n     */\r\n    public void stopSession() {\r\n    \tif (logger.isActivated()) {\r\n    \t\tlogger.debug(\"Stop the session\");\r\n    \t}\r\n\n    \t// Stop the media processor\r\n\t\tif (processor != null) {\r\n\t\t\tprocessor.stopProcessing();\r\n\t\t}\n\n        if (outputStream != null)\n            outputStream.close();\r\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtp/recorder/RtspVideoRecorder.java",
    "content": "/*******************************************************************************\r\n * Software Name : RCS IMS Stack\r\n *\r\n * Copyright (C) 2010 France Telecom S.A.\r\n *\r\n * Licensed under the Apache License, Version 2.0 (the \"License\");\r\n * you may not use this file except in compliance with the License.\r\n * You may obtain a copy of the License at\r\n *\r\n *      http://www.apache.org/licenses/LICENSE-2.0\r\n *\r\n * Unless required by applicable law or agreed to in writing, software\r\n * distributed under the License is distributed on an \"AS IS\" BASIS,\r\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n * See the License for the specific language governing permissions and\r\n * limitations under the License.\r\n ******************************************************************************/\r\n\r\npackage de.kp.net.rtp.recorder;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.MediaRegistry;\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.H263Config;\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.encoder.NativeH263Encoder;\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.encoder.NativeH263EncoderParams;\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h264.H264Config;\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h264.encoder.NativeH264Encoder;\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.video.H263VideoFormat;\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.video.H264VideoFormat;\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.video.VideoFormat;\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.media.MediaException;\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.media.MediaInput;\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.media.MediaSample;\r\nimport com.orangelabs.rcs.service.api.client.media.IMediaEventListener;\r\nimport com.orangelabs.rcs.service.api.client.media.IMediaPlayer;\r\nimport com.orangelabs.rcs.service.api.client.media.MediaCodec;\r\nimport com.orangelabs.rcs.service.api.client.media.video.VideoCodec;\r\nimport com.orangelabs.rcs.utils.FifoBuffer;\r\nimport com.orangelabs.rcs.utils.logger.Logger;\r\n\r\nimport android.hardware.Camera;\r\nimport android.os.SystemClock;\r\nimport android.util.Log;\r\n\r\nimport java.util.Vector;\r\n\r\n/**\r\n * Live RTP video player. Supports only H.263 and H264 QCIF formats.\r\n */\r\npublic class RtspVideoRecorder extends IMediaPlayer.Stub implements Camera.PreviewCallback {\r\n\r\n    /**\r\n     * List of supported video codecs\r\n     */\r\n    public static MediaCodec[] supportedMediaCodecs = {\r\n            new VideoCodec(H264Config.CODEC_NAME, H264VideoFormat.PAYLOAD, H264Config.CLOCK_RATE, H264Config.CODEC_PARAMS,\r\n                    H264Config.FRAME_RATE, H264Config.BIT_RATE, H264Config.VIDEO_WIDTH,\r\n                    H264Config.VIDEO_HEIGHT).getMediaCodec(),\r\n            new VideoCodec(H263Config.CODEC_NAME, H263VideoFormat.PAYLOAD, H263Config.CLOCK_RATE, H263Config.CODEC_PARAMS,\r\n                    H263Config.FRAME_RATE, H263Config.BIT_RATE, H263Config.VIDEO_WIDTH,\r\n                    H263Config.VIDEO_HEIGHT).getMediaCodec()\r\n    };\r\n\r\n    /**\r\n     * Selected video codec\r\n     */\r\n    private VideoCodec selectedVideoCodec = null;\r\n\r\n    /**\r\n     * Video format\r\n     */\r\n    private VideoFormat videoFormat;\r\n\r\n    /**\r\n     * Local RTP port\r\n     */\r\n    private int localRtpPort;\r\n\r\n    /**\r\n     * RTP sender session\r\n     */\r\n    private MediaRtpSender rtpMediaSender = null;\r\n\r\n    /**\r\n     * RTP media input\r\n     */\r\n    private MediaRtpInput rtpInput = null;\r\n\r\n    /**\r\n     * Last video frame\r\n     */\r\n    private CameraBuffer frameBuffer = null;\r\n\r\n    /**\r\n     * Is player opened\r\n     */\r\n    private boolean opened = false;\r\n\r\n    /**\r\n     * Is player started\r\n     */\r\n    private boolean started = false;\r\n\r\n    /**\r\n     * Video start time\r\n     */\r\n    private long videoStartTime = 0L;\r\n\r\n    /**\r\n     * Media event listeners\r\n     */\r\n    private Vector<IMediaEventListener> listeners = new Vector<IMediaEventListener>();\r\n\r\n    /**\r\n     * The logger\r\n     */\r\n    private Logger logger = Logger.getLogger(this.getClass().getName());\r\n\r\n\tprivate String TAG = \"RtspVideoRecorder\";\r\n\r\n    /**\r\n     * Constructor\r\n     */\r\n    public RtspVideoRecorder() {\r\n    }\r\n\r\n    /**\r\n     * Constructor. Force a video codec.\r\n     *\r\n     * @param codec Video codec\r\n     */\r\n    public RtspVideoRecorder(VideoCodec codec) {\r\n        // Set the media codec\r\n        setMediaCodec(codec.getMediaCodec());\r\n    }\r\n\r\n    /**\r\n     * Constructor. Force a video codec.\r\n     *\r\n     * @param codec Video codec name\r\n     */\r\n    public RtspVideoRecorder(String codec) {\r\n        // Set the media codec\r\n        for (int i = 0; i < supportedMediaCodecs.length ; i++) {\r\n            if (codec.toLowerCase().contains(supportedMediaCodecs[i].getCodecName().toLowerCase())) {\r\n                setMediaCodec(supportedMediaCodecs[i]);\r\n                break;\r\n            }\r\n        }\r\n    }\r\n\r\n    /**\r\n     * Returns the local RTP port\r\n     *\r\n     * @return Port\r\n     */\r\n    public int getLocalRtpPort() {\r\n        return localRtpPort;\r\n    }\r\n\r\n    /**\r\n     * Return the video start time\r\n     *\r\n     * @return Milliseconds\r\n     */\r\n    public long getVideoStartTime() {\r\n        return videoStartTime;\r\n    }\r\n\r\n    /**\r\n     * Is player opened\r\n     *\r\n     * @return Boolean\r\n     */\r\n    public boolean isOpened() {\r\n        return opened;\r\n    }\r\n\r\n    /**\r\n     * Is player started\r\n     *\r\n     * @return Boolean\r\n     */\r\n    public boolean isStarted() {\r\n        return started;\r\n    }\r\n\r\n    /**\r\n     * Open the player\r\n     *\r\n     * @param remoteHost Remote host\r\n     * @param remotePort Remote port\r\n     */\r\n    public void open(String remoteHost, int remotePort) {\r\n    \t// This is an interface method, that is no longer\r\n    \t// used with the actual context\r\n    }\r\n    \r\n    public void open() {\r\n\r\n    \tif (opened) {\r\n            // Already opened\r\n            return;\r\n        }\r\n\r\n        // Check video codec\r\n        if (selectedVideoCodec == null) {\r\n        \t\r\n            if (logger.isActivated()) {\r\n                logger.debug(\"Player error: Video Codec not selected\");\r\n            }\r\n\r\n            return;\r\n\r\n        }\r\n\r\n        // Init video encoder\r\n        try {\r\n            if (selectedVideoCodec.getCodecName().equalsIgnoreCase(H264Config.CODEC_NAME)) {\r\n                // H264\r\n                NativeH264Encoder.InitEncoder(selectedVideoCodec.getWidth(), selectedVideoCodec.getHeight(), selectedVideoCodec.getFramerate());\r\n\r\n            } else if (selectedVideoCodec.getCodecName().equalsIgnoreCase(H263Config.CODEC_NAME)) {\r\n                // Default H263\r\n                NativeH263EncoderParams params = new NativeH263EncoderParams();\r\n            \r\n                params.setEncFrameRate(selectedVideoCodec.getFramerate());\r\n                params.setBitRate(selectedVideoCodec.getBitrate());\r\n\r\n                // set width/height parameters for native encoding, too\r\n                params.setEncHeight(selectedVideoCodec.getHeight());\r\n                params.setEncWidth(selectedVideoCodec.getWidth());\r\n                \r\n                params.setTickPerSrc(params.getTimeIncRes() / selectedVideoCodec.getFramerate());\r\n                params.setIntraPeriod(-1);\r\n                params.setNoFrameSkipped(false);\r\n                \r\n                int result = NativeH263Encoder.InitEncoder(params);\r\n                \r\n                if (result != 1) {\r\n                \t\r\n                    if (logger.isActivated()) {\r\n                        logger.debug(\"Player error: Encoder init failed with error code \" + result);\r\n                    }\r\n\r\n                    return;\r\n\r\n                }\r\n            }\r\n        \r\n        } catch (UnsatisfiedLinkError e) {\r\n\r\n        \tif (logger.isActivated()) {\r\n                logger.debug(\"Player error: \" + e.getMessage());\r\n            }\r\n\r\n            return;\r\n\r\n        }\r\n\r\n        // Init the RTP layer\r\n        try {\r\n\r\n        \trtpInput = new MediaRtpInput();\r\n            rtpInput.open();\r\n            \r\n        \trtpMediaSender = new MediaRtpSender(videoFormat);            \r\n            rtpMediaSender.prepareSession(rtpInput);\r\n        \r\n        } catch (Exception e) {\r\n        \t\r\n            if (logger.isActivated()) {\r\n                logger.debug(\"Player error: \" + e.getMessage());\r\n            }\r\n        \t\r\n            return;\r\n        }\r\n\r\n        // Player is opened\r\n        opened = true;\r\n\r\n    }\r\n\r\n    /**\r\n     * Close the player\r\n     */\r\n    public void close() {\r\n        if (!opened) {\r\n            // Already closed\r\n            return;\r\n        }\r\n        // Close the RTP layer\r\n        rtpInput.close();\r\n        rtpMediaSender.stopSession();\r\n\r\n        try {\r\n            // Close the video encoder\r\n            if (selectedVideoCodec.getCodecName().equalsIgnoreCase(H264Config.CODEC_NAME)) {\r\n                NativeH264Encoder.DeinitEncoder();\r\n\r\n            } else if (selectedVideoCodec.getCodecName().equalsIgnoreCase(H263Config.CODEC_NAME)) {\r\n                NativeH263Encoder.DeinitEncoder();\r\n            }\r\n        \r\n        } catch (UnsatisfiedLinkError e) {\r\n            if (logger.isActivated()) {\r\n                logger.error(\"Can't close correctly the video encoder\", e);\r\n            }\r\n        }\r\n\r\n        // Player is closed\r\n        opened = false;\r\n\r\n    }\r\n\r\n    /**\r\n     * Start the player\r\n     */\r\n    public synchronized void start() {\r\n\t\tLog.d(TAG , \"start\");\r\n   \t\r\n        if ((opened == false) || (started == true)) {\r\n            return;\r\n        }\r\n\r\n        started = true;\r\n\r\n        // Start RTP layer\r\n        rtpMediaSender.startSession();\r\n        \r\n        // Start capture\r\n        captureThread.start();\r\n\r\n        // Player is started\r\n        videoStartTime = SystemClock.uptimeMillis();\r\n\r\n    }\r\n\r\n    /**\r\n     * Stop the player\r\n     */\r\n    public void stop() {\r\n        \r\n    \tif ((opened == false) || (started == false)) { \r\n            return;\r\n        }\r\n\r\n        // Stop capture\r\n        try {\r\n            captureThread.interrupt();\r\n\r\n        } catch (Exception e) {\r\n        }\r\n\r\n        // Player is stopped\r\n        videoStartTime = 0L;\r\n        started = false;\r\n\r\n    }\r\n\r\n    /**\r\n     * Add a media event listener\r\n     *\r\n     * @param listener Media event listener\r\n     */\r\n    public void addListener(IMediaEventListener listener) {\r\n        listeners.addElement(listener);\r\n    }\r\n\r\n    /**\r\n     * Remove all media event listeners\r\n     */\r\n    public void removeAllListeners() {\r\n        listeners.removeAllElements();\r\n    }\r\n\r\n    /**\r\n     * Get supported media codecs\r\n     *\r\n     * @return media Codecs list\r\n     */\r\n    public MediaCodec[] getSupportedMediaCodecs() {\r\n        return supportedMediaCodecs;\r\n    }\r\n\r\n    /**\r\n     * Get media codec\r\n     *\r\n     * @return Media Codec\r\n     */\r\n    public MediaCodec getMediaCodec() {\r\n        if (selectedVideoCodec == null)\r\n            return null;\r\n        else\r\n            return selectedVideoCodec.getMediaCodec();\r\n    }\r\n\r\n    /**\r\n     * Set media codec\r\n     *\r\n     * @param mediaCodec Media codec\r\n     */\r\n    public void setMediaCodec(MediaCodec mediaCodec) {\r\n       \r\n    \tif (VideoCodec.checkVideoCodec(supportedMediaCodecs, new VideoCodec(mediaCodec))) {\r\n        \r\n    \t\tselectedVideoCodec = new VideoCodec(mediaCodec);\r\n            videoFormat = (VideoFormat) MediaRegistry.generateFormat(mediaCodec.getCodecName());\r\n\r\n            // Initialize frame buffer\r\n            if (frameBuffer == null) {\r\n                frameBuffer = new CameraBuffer();\r\n            }\r\n\r\n        } else {\r\n\r\n            if (logger.isActivated()) {\r\n                logger.debug(\"Player error: Codec not supported\");\r\n            }\r\n\r\n        }\r\n    }\r\n\r\n    /**\r\n     * Preview frame from the camera\r\n     *\r\n     * @param data Frame\r\n     * @param camera Camera\r\n     */\r\n    public void onPreviewFrame(byte[] data, Camera camera) {\r\n        if (frameBuffer != null)\r\n            frameBuffer.setFrame(data);\r\n    }\r\n\r\n    /**\r\n     * Camera buffer\r\n     */\r\n    private class CameraBuffer {\r\n        /**\r\n         * YUV frame where frame size is always (videoWidth*videoHeight*3)/2\r\n         */\r\n        private byte frame[] = new byte[(selectedVideoCodec.getWidth()\r\n                * selectedVideoCodec.getHeight() * 3) / 2];\r\n\r\n        /**\r\n         * Set the last captured frame\r\n         *\r\n         * @param frame Frame\r\n         */\r\n        public void setFrame(byte[] frame) {\r\n            this.frame = frame;\r\n        }\r\n\r\n        /**\r\n         * Return the last captured frame\r\n         *\r\n         * @return Frame\r\n         */\r\n        public byte[] getFrame() {\r\n            return frame;\r\n        }\r\n    }\r\n\r\n    /**\r\n     * Video capture thread\r\n     */\r\n    private Thread captureThread = new Thread() {\r\n        /**\r\n         * Timestamp\r\n         */\r\n        private long timeStamp = 0;\r\n\r\n        /**\r\n         * Processing\r\n         */\r\n        public void run() {\r\n//            if (rtpInput == null) {\r\n//                return;\r\n//            }\r\n\r\n            int timeToSleep = 1000 / selectedVideoCodec.getFramerate();\r\n            int timestampInc = 90000 / selectedVideoCodec.getFramerate();\r\n            byte[] frameData;\r\n            byte[] encodedFrame;\r\n            long encoderTs = 0;\r\n            long oldTs = System.currentTimeMillis();\r\n\r\n            while (started) {\r\n                // Set timestamp\r\n                long time = System.currentTimeMillis();\r\n                encoderTs = encoderTs + (time - oldTs);\r\n\r\n                // Get data to encode\r\n                frameData = frameBuffer.getFrame();\r\n                \r\n                // Encode frame\r\n                int encodeResult;\r\n                if (selectedVideoCodec.getCodecName().equalsIgnoreCase(H264Config.CODEC_NAME)) {\r\n                    encodedFrame = NativeH264Encoder.EncodeFrame(frameData, encoderTs);\r\n                    encodeResult = NativeH264Encoder.getLastEncodeStatus();\r\n                } else {\r\n                    encodedFrame = NativeH263Encoder.EncodeFrame(frameData, encoderTs);\r\n                    encodeResult = 0;\r\n                }\r\n\r\n        \t\tSystem.out.println(\"RtpVideoRecorder: captureThread: encodeResult == \" + encodeResult);\r\n\r\n        \t\t/*\r\n        \t\t * accept additional status \r\n        \t\t * EAVCEI_MORE_NAL     --  there is more NAL to be retrieved\r\n        \t\t */\r\n                if ((encodeResult == 0 || encodeResult == 6) && encodedFrame.length > 0) {\r\n                \t\r\n                \tif (encodeResult == 6)\r\n                \t\tSystem.out.println(\"RtpVideoRecorder: captureThread: Status == EAVCEI_MORE_NAL\");\r\n                \t\r\n                    // Send encoded frame                \t\r\n                    rtpInput.addFrame(encodedFrame, timeStamp += timestampInc);\r\n                }\r\n\r\n                // Sleep between frames if necessary\r\n                long delta = System.currentTimeMillis() - time;\r\n                if (delta < timeToSleep) {\r\n                    try {\r\n                        Thread.sleep((timeToSleep - delta) - (((timeToSleep - delta) * 10) / 100));\r\n                    } catch (InterruptedException e) {\r\n                    }\r\n                }\r\n\r\n                // Update old timestamp\r\n                oldTs = time;\r\n            }\r\n        }\r\n    };\r\n\r\n    /**\r\n     * Media RTP input\r\n     */\r\n    private static class MediaRtpInput implements MediaInput {\r\n        /**\r\n         * Received frames\r\n         */\r\n        private FifoBuffer fifo = null;\r\n\r\n        /**\r\n         * Constructor\r\n         */\r\n        public MediaRtpInput() {\r\n        }\r\n\r\n        /**\r\n         * Add a new video frame\r\n         *\r\n         * @param data Data\r\n         * @param timestamp Timestamp\r\n         */\r\n        public void addFrame(byte[] data, long timestamp) {\r\n            if (fifo != null) {\r\n                fifo.addObject(new MediaSample(data, timestamp));\r\n            }\r\n        }\r\n\r\n        /**\r\n         * Open the player\r\n         */\r\n        public void open() {\r\n            fifo = new FifoBuffer();\r\n        }\r\n\r\n        /**\r\n         * Close the player\r\n         */\r\n        public void close() {\r\n            if (fifo != null) {\r\n                fifo.close();\r\n                fifo = null;\r\n            }\r\n        }\r\n\r\n        /**\r\n         * Read a media sample (blocking method)\r\n         *\r\n         * @return Media sample\r\n         * @throws MediaException\r\n         */\r\n        public MediaSample readSample() throws MediaException {\r\n            try {\r\n                if (fifo != null) {\r\n                    return (MediaSample)fifo.getObject();\r\n                } else {\r\n                    throw new MediaException(\"Media input not opened\");\r\n                }\r\n            } catch (Exception e) {\r\n                throw new MediaException(\"Can't read media sample\");\r\n            }\r\n        }\r\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtp/stream/RtpOutputStream.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage de.kp.net.rtp.stream;\r\n\nimport com.orangelabs.rcs.core.ims.protocol.rtp.core.RtcpSession;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.core.RtpPacket;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.stream.ProcessorOutputStream;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.util.Buffer;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.util.Packet;\nimport com.orangelabs.rcs.utils.logger.Logger;\n\nimport de.kp.net.rtp.RtpSender;\n\nimport java.io.IOException;\n\n/**\n * RTP output stream\n *\n * @author Peter Arwanitis (arwanitis@dr-kruscheundpartner.de)\n * @author Stefan Krusche  (krusche@dr-kruscheundpartner.de)\n * \n */\r\npublic class RtpOutputStream implements ProcessorOutputStream {\r\n \n    /**\n     * Sequence number\n     */\n\tprivate int seqNumber = 0;\n\n    /**\n     * RTCP Session\n     */\n    private RtcpSession rtcpSession = null;\n\n    /**\n     * The logger\n     */\r\n\tprivate final Logger logger = Logger.getLogger(this.getClass().getName());\n\n    public RtpOutputStream() {\t\n\n    \t// Used to build SSCR \n    \trtcpSession = new RtcpSession(true, 16000);\n\n    }\n\n    public void open() throws Exception {\r\n    }\n\n    public void close() {\r\n\t}\r\n\r\n    /**\n     * Write to the stream without blocking\n     *\n     * @param buffer Input buffer\n     * @throws IOException\n     */\r\n    public void write(Buffer buffer) throws IOException {\n\t\t\n\t\t// Build a RTP packet\n    \tRtpPacket packet = buildRtpPacket(buffer);\n    \tif (packet == null) return;\n\n    \t// Assemble RTP packet\n    \tint size = packet.calcLength();\n    \tpacket.assemble(size);\n\n    \t// Send the RTP packet to the remote destination\n    \ttransmit(packet);\n    \n    }\n\n    /**\n     * Build a RTP packet\n     *\n     * @param buffer Input buffer\n     * @return RTP packet\n     */\n\tprivate RtpPacket buildRtpPacket(Buffer buffer) {\n\n\t\tbyte data[] = (byte[])buffer.getData();\n\t\tif (data == null) return null;\n\n\t\tPacket packet = new Packet();\n\t\tpacket.data = data;\n\t\t\n\t\tpacket.offset = 0;\n\t\tpacket.length = buffer.getLength();\n\n\t\tRtpPacket rtpPacket = new RtpPacket(packet);\n\t\tif ((buffer.getFlags() & 0x800) != 0) {\n\t\t\trtpPacket.marker = 1;\n\t\t\n\t\t} else {\n\t\t\trtpPacket.marker = 0;\n\t\t\n\t\t}\n\n\t\trtpPacket.payloadType = buffer.getFormat().getPayload();\n\t\trtpPacket.seqnum = seqNumber++;\n\t\n\t\trtpPacket.timestamp = buffer.getTimeStamp();\n        rtpPacket.ssrc      = rtcpSession.SSRC;\n\t\t\n        rtpPacket.payloadoffset = buffer.getOffset();\n\t\trtpPacket.payloadlength = buffer.getLength();\n\t\t\n\t\treturn rtpPacket;\n\t\n\t}\n\n    /**\n     * Transmit a RTCP compound packet to the remote destination\n     *\n     * @param packet RTP packet\n     * @throws IOException\n     */\n\tprivate void transmit(Packet packet) {\n\n\t\t// Prepare data to be sent\n\t\tbyte[] data = packet.data;\n\t\t\n\t\tif (packet.offset > 0) {\n\t\t\tSystem.arraycopy(data, packet.offset, data = new byte[packet.length], 0, packet.length);\n\t\t}\n\n\t\t// broadcast data\n    \ttry {\n\t\t\tRtpSender.getInstance().send(data);\n\n    \t} catch (IOException e) {\n\n    \t\te.printStackTrace();\n\t\t\tif (logger.isActivated()) {\n\t\t\t\tlogger.error(\"Can't broadcast the RTP packet\", e);\n\t\t\t}\n\t\t\n    \t}\n    \t\n\t}\n\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtp/viewer/RtpVideoRenderer.java",
    "content": "/*******************************************************************************\n * Software Name : RCS IMS Stack\n *\n * Copyright (C) 2010 France Telecom S.A.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n *      http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n ******************************************************************************/\n\npackage de.kp.net.rtp.viewer;\n\nimport com.orangelabs.rcs.core.ims.protocol.rtp.MediaRegistry;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.MediaRtpReceiver;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.H263Config;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.decoder.NativeH263Decoder;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h264.H264Config;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h264.decoder.NativeH264Decoder;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.video.H263VideoFormat;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.video.H264VideoFormat;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.format.video.VideoFormat;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.media.MediaOutput;\nimport com.orangelabs.rcs.core.ims.protocol.rtp.media.MediaSample;\nimport com.orangelabs.rcs.platform.network.DatagramConnection;\nimport com.orangelabs.rcs.platform.network.NetworkFactory;\nimport com.orangelabs.rcs.service.api.client.media.IMediaEventListener;\nimport com.orangelabs.rcs.service.api.client.media.IMediaRenderer;\nimport com.orangelabs.rcs.service.api.client.media.MediaCodec;\nimport com.orangelabs.rcs.service.api.client.media.video.VideoCodec;\nimport com.orangelabs.rcs.service.api.client.media.video.VideoSurfaceView;\nimport com.orangelabs.rcs.utils.logger.Logger;\n\nimport de.kp.net.rtsp.RtspConstants;\nimport de.kp.net.rtsp.client.RtspControl;\nimport de.kp.net.rtsp.client.message.RtspDescriptor;\nimport de.kp.net.rtsp.client.message.RtspMedia;\n\nimport android.graphics.Bitmap;\nimport android.os.RemoteException;\nimport android.os.SystemClock;\n\nimport java.io.IOException;\nimport java.util.List;\nimport java.util.Vector;\n\n/**\n * Video RTP renderer. Supports only H.263 and H264 QCIF formats.\n *\n * @author jexa7410\n */\npublic class RtpVideoRenderer extends IMediaRenderer.Stub {\n\n    /**\n     * List of supported video codecs\n     */\n    public static MediaCodec[] supportedMediaCodecs = {\n            new VideoCodec(H264Config.CODEC_NAME, H264VideoFormat.PAYLOAD, H264Config.CLOCK_RATE, H264Config.CODEC_PARAMS,\n                    H264Config.FRAME_RATE, H264Config.BIT_RATE, H264Config.VIDEO_WIDTH,\n                    H264Config.VIDEO_HEIGHT).getMediaCodec(),\n            new VideoCodec(H263Config.CODEC_NAME, H263VideoFormat.PAYLOAD, H263Config.CLOCK_RATE, H263Config.CODEC_PARAMS,\n                    H263Config.FRAME_RATE, H263Config.BIT_RATE, H263Config.VIDEO_WIDTH,\n                    H263Config.VIDEO_HEIGHT).getMediaCodec()\n    };\n\n    /**\n     * Selected video codec\n     */\n    private VideoCodec selectedVideoCodec = null;\n\n    /**\n     * Video format\n     */\n    private VideoFormat videoFormat;\n\n    /**\n     * Local RTP port\n     */\n    private int localRtpPort;\n\n    /**\n     * RTP receiver session\n     */\n    private MediaRtpReceiver rtpReceiver = null;\n\n    /**\n     * RTP media output\n     */\n    private MediaRtpOutput rtpOutput = null;\n\n    /**\n     * Is player opened\n     */\n    private boolean opened = false;\n\n    /**\n     * Is player started\n     */\n    private boolean started = false;\n\n    /**\n     * Video start time\n     */\n    private long videoStartTime = 0L;\n\n    /**\n     * Video surface\n     */\n    private VideoSurfaceView surface = null;\n\n    /**\n     * Media event listeners\n     */\n    private Vector<IMediaEventListener> listeners = new Vector<IMediaEventListener>();\n\n    /**\n     * The logger\n     */\n    private Logger logger = Logger.getLogger(this.getClass().getName());\n\n    /**\n     * Temporary connection to reserve the port\n     */\n    private DatagramConnection temporaryConnection = null;\n\n    /**\n     * RTSP Control\n     */\n    private RtspControl rtspControl;\n    \n    /**\n     * Constructor Force a RTSP Server Uri\n     * @throws Exception \n     */\n    \n    public RtpVideoRenderer(String uri) throws Exception {\n        \n        /*\n         * The RtspControl opens a connection to an RtspServer, that\n         * is determined by the URI provided.\n         */\n        rtspControl = new RtspControl(uri);    \n        \n        /*\n         * wait unit the rtspControl has achieved status READY; in this \n         * state, an SDP file is present and is ready to get evaluated\n         */\n        while (rtspControl.getState() != RtspConstants.READY) {\n        \t; // blocking\n        }\n\n        /* \n         * Set the local RTP port: this is the (socket)\n         * port, the RtspVideoRenderer is listening to\n         * (UDP) RTP packets.\n         */\n        \n    \t// localRtpPort = NetworkRessourceManager.generateLocalRtpPort();\n    \tlocalRtpPort = rtspControl.getClientPort();\n        reservePort(localRtpPort);\n\n        /*\n         * The media resources associated with the SDP descriptor are\n         * evaluated and the respective video encoding determined\n         */\n        \n        RtspDescriptor rtspDescriptor = rtspControl.getDescriptor();\n        List<RtspMedia> mediaList = rtspDescriptor.getMediaList();\n        \n        if (mediaList.size() == 0) throw new Exception(\"The session description contains no media resource.\");\n        RtspMedia videoResource = null;\n        \n        for (RtspMedia mediaItem:mediaList) {\n        \t\n        \tif (mediaItem.getMediaType().equals(RtspConstants.SDP_VIDEO_TYPE)) {\n        \t\tvideoResource = mediaItem;\n        \t\tbreak;\n        \t}\n        \t\n        }\n        \n        if (videoResource == null) throw new Exception(\"The session description contains no video resource.\");\n        \n        String codec = videoResource.getEncoding();\n        if (codec == null) throw new Exception(\"No encoding provided for video resource.\");\n        \n        // Set the media codec\n        for (int i = 0; i < supportedMediaCodecs.length; i++) {\n            if (codec.toLowerCase().contains(supportedMediaCodecs[i].getCodecName().toLowerCase())) {\n                setMediaCodec(supportedMediaCodecs[i]);\n                break;\n            }\n        }\n        \n    }\n    \n    /**\n     * Set the surface to render video\n     *\n     * @param surface Video surface\n     */\n    public void setVideoSurface(VideoSurfaceView surface) {\n        this.surface = surface;\n    }\n\n    /**\n     * Return the video start time\n     *\n     * @return Milliseconds\n     */\n    public long getVideoStartTime() {\n        return videoStartTime;\n    }\n\n    /**\n     * Returns the local RTP port\n     *\n     * @return Port\n     */\n    public int getLocalRtpPort() {\n        return localRtpPort;\n    }\n\n    /**\n     * Reserve a port.\n     *\n     * @param port the port to reserve\n     */\n    private void reservePort(int port) {\n\n    \tif (temporaryConnection != null) return;\n        try {\n            temporaryConnection = NetworkFactory.getFactory().createDatagramConnection();\n            temporaryConnection.open(port);\n\n        } catch (IOException e) {\n            temporaryConnection = null;\n        }\n\n    }\n\n    /**\n     * Release the reserved port; this method\n     * is invoked while preparing the RTP layer\n     */\n    private void releasePort() {\n\n    \tif (temporaryConnection == null) return;\n\t\ttry {\n            temporaryConnection.close();\n    \n        } catch (IOException e) {\n            temporaryConnection = null;\n        }\n        \n    }\n\n    /**\n     * Is player opened\n     *\n     * @return Boolean\n     */\n    public boolean isOpened() {\n        return opened;\n    }\n\n    /**\n     * Is player started\n     *\n     * @return Boolean\n     */\n    public boolean isStarted() {\n        return started;\n    }\n\n    /**\n     * Open the renderer\n     */\n    public void open() {\n \n    \tif (opened) {\n            // Already opened\n            return;\n        }\n\n        // Check video codec\n        if (selectedVideoCodec == null) {\n        \t          \t\n\t\t\tif (logger.isActivated()) {\n\t\t\t    logger.debug(\"Player error: Video Codec not selected\");\n\t\t\t}\n\n            return;\n\n        }\n\n        try {\n            // Init the video decoder\n            int result;\n            if (selectedVideoCodec.getCodecName().equalsIgnoreCase(H264Config.CODEC_NAME)) {\n                result = NativeH264Decoder.InitDecoder();\n            \n            } else { // default H263\n                result = NativeH263Decoder.InitDecoder(selectedVideoCodec.getWidth(), selectedVideoCodec.getHeight());\n            \n            }\n            \n            if (result == 0) {\n            \t           \t\n                if (logger.isActivated()) {\n                    logger.debug(\"Player error: Decoder init failed with error code \" + result);\n                }\n\n                return;\n            }\n        \n        } catch (UnsatisfiedLinkError e) {\n\t\n            if (logger.isActivated()) {\n                logger.debug(\"Player error: \" + e.getMessage());\n            }\n\n            return;\n        \n        }\n\n        try {\n\n        \t// initialize RTP layer\n            \n        \treleasePort();\n\n            rtpOutput = new MediaRtpOutput();\n            rtpOutput.open();\n            \n            rtpReceiver = new MediaRtpReceiver(localRtpPort);\n            rtpReceiver.prepareSession(rtpOutput, videoFormat);\n\n        } catch (Exception e) {\n        \t\n            if (logger.isActivated()) {\n                logger.debug(\"Player error: \" + e.getMessage());\n            }\n\n            return;\n\n        }\n\n        // Player is opened\n        opened = true;\n\n    }\n\n    /**\n     * Close the renderer\n     */\n    public void close() {\n\n    \tif (opened == false) return;\n\n    \t// Send TEARDOWN request to RTSP Server\n    \trtspControl.stop();\n    \t\n        // Close the RTP layer\n        rtpReceiver.stopSession();\n    \trtpOutput.close();\n\n        // Close the video decoder\n    \tcloseVideoDecoder();\n\n        // Player is closed\n        opened = false;\n\n    }\n\n    public void closeVideoDecoder() {\n\n    \ttry {\n            // Close the video decoder\n            if (selectedVideoCodec.getCodecName().equalsIgnoreCase(H264Config.CODEC_NAME)) {\n                NativeH264Decoder.DeinitDecoder();\n\n            } else { // default H263\n                NativeH263Decoder.DeinitDecoder();\n            }\n        \n        } catch (UnsatisfiedLinkError e) {\n            if (logger.isActivated()) {\n                logger.error(\"Can't close correctly the video decoder\", e);\n            }\n        \n        }\n    \t\n    }\n    /**\n     * Start the RTP layer (i.e listen to the reserved local\n     * port for RTP packets), and send a PLAY request to the\n     * RTSP server \n     */\n    public void start() {\n\n    \tif ((opened == false) || (started == true)) {\n            return;\n        }\n    \t\n        // Start RTP layer\n        rtpReceiver.startSession();\n\n        // Send PLAY request to RTSP Server\n        rtspControl.play();\n        \n        /*\n         * wait unit the rtspControl has achieved status PLAYING\n         */\n        while (rtspControl.getState() != RtspConstants.PLAYING) {\n        \t; // blocking\n        }\n        \n        \n        // Renderer is started\n        videoStartTime = SystemClock.uptimeMillis();\n        started = true;\n\n    }\n\n    /**\n     * Stop the renderer\n     */\n    public void stop() {\n\n    \tif (started == false) return;\n\n    \t// Send TEARDOWN request to RTSP Server\n    \trtspControl.stop();\n \n        // Stop RTP layer\n        if (rtpReceiver != null) rtpReceiver.stopSession();\n\n        if (rtpOutput != null) rtpOutput.close();\n\n        // Force black screen\n    \tsurface.clearImage();\n\n        // Close the video decoder\n    \tcloseVideoDecoder();\n\n        // Renderer is stopped\n        started = false;\n        videoStartTime = 0L;\n    \n    }\n\n    /**\n     * Add a media event listener\n     *\n     * @param listener Media event listener\n     */\n    public void addListener(IMediaEventListener listener) {\n        listeners.addElement(listener);\n    }\n\n    /**\n     * Remove all media event listeners\n     */\n    public void removeAllListeners() {\n        listeners.removeAllElements();\n    }\n\n    /**\n     * Get supported media codecs\n     *\n     * @return media Codecs list\n     */\n    public MediaCodec[] getSupportedMediaCodecs() {\n        return supportedMediaCodecs;\n    }\n\n    /**\n     * Get media codec\n     *\n     * @return Media codec\n     */\n    public MediaCodec getMediaCodec() {\n        if (selectedVideoCodec == null)\n            return null;\n        else\n            return selectedVideoCodec.getMediaCodec();\n    }\n\n    /**\n     * Set media codec\n     *\n     * @param mediaCodec Media codec\n     */\n    public void setMediaCodec(MediaCodec mediaCodec) {\n        \n    \tif (VideoCodec.checkVideoCodec(supportedMediaCodecs, new VideoCodec(mediaCodec))) {\n            selectedVideoCodec = new VideoCodec(mediaCodec);\n            videoFormat = (VideoFormat) MediaRegistry.generateFormat(mediaCodec.getCodecName());\n        \n    \t} else {\n    \t\t\n            if (logger.isActivated()) {\n                logger.debug(\"Player error: Codec not supported\");\n            }\n        \n    \t}\n    }\n\n    /**\n     * Media RTP output\n     */\n    private class MediaRtpOutput implements MediaOutput {\n        /**\n         * Video frame\n         */\n        private int decodedFrame[];\n\n        /**\n         * Bitmap frame\n         */\n        private Bitmap rgbFrame;\n\n        /**\n         * Constructor\n         */\n        public MediaRtpOutput() {\n            \n        \tdecodedFrame = new int[selectedVideoCodec.getWidth() * selectedVideoCodec.getHeight()];\n            rgbFrame     = Bitmap.createBitmap(selectedVideoCodec.getWidth(), selectedVideoCodec.getHeight(), Bitmap.Config.RGB_565);\n        \n        }\n\n        /**\n         * Open the renderer\n         */\n        public void open() {\n        }\n\n        /**\n         * Close the renderer\n         */\n        public void close() {\n        }\n\n        /**\n         * Write a media sample\n         *\n         * @param sample Sample\n         */\n\t\tpublic void writeSample(MediaSample sample) {\n\t\t\tif (selectedVideoCodec.getCodecName().equalsIgnoreCase(H264Config.CODEC_NAME)) {\n\t\t\t\tif (NativeH264Decoder.DecodeAndConvert(sample.getData(), decodedFrame) == 1) {\n\t\t\t\t\trgbFrame.setPixels(decodedFrame, 0, selectedVideoCodec.getWidth(), 0, 0,\n\t\t\t\t\t\t\tselectedVideoCodec.getWidth(), selectedVideoCodec.getHeight());\n\n\t\t\t\t\tif (surface != null) {\n\t\t\t\t\t\tsurface.setImage(rgbFrame);\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tSystem.out.println(\"MediaRtpOutput.writeSample: cannot decode sample >len:\" + sample.getLength());\n\t\t\t\t}\n\t\t\t} else { // default H263\n\t\t\t\tif (NativeH263Decoder.DecodeAndConvert(sample.getData(), decodedFrame, sample.getTimeStamp()) == 1) {\n\t\t\t\t\trgbFrame.setPixels(decodedFrame, 0, selectedVideoCodec.getWidth(), 0, 0,\n\t\t\t\t\t\t\tselectedVideoCodec.getWidth(), selectedVideoCodec.getHeight());\n\t\t\t\t\tif (surface != null) {\n\t\t\t\t\t\tsurface.setImage(rgbFrame);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n    }\n\n\t@Override\n\tpublic void open(String remoteHost, int remotePort) throws RemoteException {\n\t}\n\n}\n\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/RtspConstants.java",
    "content": "package de.kp.net.rtsp;\r\n\r\nimport java.net.InetAddress;\r\nimport java.net.NetworkInterface;\r\nimport java.net.SocketException;\r\nimport java.util.Enumeration;\r\n\r\nimport android.util.Log;\r\n\r\npublic class RtspConstants {\r\n\r\n\t// rtsp states\r\n\tpublic static int INIT \t\t= 0;\r\n\tpublic static int READY \t= 1;\r\n\tpublic static int PLAYING \t= 2;\r\n\tpublic static int UNDEFINED = 3;\r\n\t\r\n\t// rtsp message types\r\n\tpublic static int OPTIONS \t= 3;\r\n\tpublic static int DESCRIBE \t= 4;\r\n\tpublic static int SETUP \t= 5;\r\n\tpublic static int PLAY \t\t= 6;\r\n\tpublic static int PAUSE \t= 7;\r\n\tpublic static int TEARDOWN \t= 8;\r\n\t\r\n\tpublic static String SDP_AUDIO_TYPE = \"audio\";\r\n\tpublic static String SDP_VIDEO_TYPE = \"video\";\r\n\t\r\n\t// the payload type is part of the SDP description\r\n\t// sent back as an answer to a DESCRIBE request.\r\n\t\r\n\t// android actually supports video streaming from\r\n\t// the camera using H.263-1998\r\n\t\r\n\t// TODO: sync with \r\n\t// \t\tcom.orangelabs.rcs.core.ims.protocol.rtp.format.video.H263VideoFormat.PAYLOAD = 97\r\n\t//\t\tcom.orangelabs.rcs.core.ims.protocol.rtp.format.video.H264VideoFormat.PAYLOAD = 96\r\n\tpublic static int RTP_H264_PAYLOADTYPE = 96; // dynamic range\r\n\tpublic static int RTP_H263_PAYLOADTYPE = 97; // dynamic range\r\n\t\r\n\tpublic static String H263_1998 = \"H263-1998/90000\";\r\n\tpublic static String H263_2000 = \"H263-2000/90000\";\r\n\tpublic static String H264 = \"H264/90000\";\r\n\t\r\n\tpublic static enum VideoEncoder {\r\n\t\tH263_ENCODER,\r\n\t\tH264_ENCODER\r\n\t};\r\n\t\r\n\t// TODO: synchronize settings\r\n\t// com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h263.H263Config\r\n\t// com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h264.H264Config\r\n\t\r\n\t// QCIF\r\n//\tpublic static String WIDTH  = \"176\";\r\n//\tpublic static String HEIGHT = \"144\";\r\n\t\r\n\t// QCIF\r\n\tpublic static String WIDTH  = \"352\";\r\n\tpublic static String HEIGHT = \"288\";\r\n\t\r\n\tpublic static final int FPS = 15;\r\n\tpublic static final int BITRATE = 128000; // h263-2000 \r\n\t//public static final int BITRATE  = 64000; // for h264\r\n\t\r\n    public static final String SEP  = \" \";\r\n\r\n\t\r\n\t// default client ports for audio and video streaming;\r\n\t// the port is usually provided with an RTSP request\r\n    public static final int CLIENT_AUDIO_PORT = 2000;\r\n    public static final int CLIENT_VIDEO_PORT = 4000;\r\n\r\n//\tpublic static String SERVER_IP = \"spexhd2:8080\";\r\n    public static int SERVER_PORT = 8080;\r\n    public static String SERVER_IP = getLocalIpAddress() + \":\" + SERVER_PORT;\r\n    \t\t\r\n\tpublic static String SERVER_NAME    = \"KuP RTSP Server\";\r\n    public static String SERVER_VERSION = \"0.1\";\r\n    \r\n    public static int PORT_BASE = 3000;\r\n    public static int[] PORTS_RTSP_RTP = {PORT_BASE, (PORT_BASE + 1)};\r\n    \r\n\tpublic static final String DIR_MULTIMEDIA = \"../\";\r\n\t\r\n\t// tags for logging\r\n\tpublic static String SERVER_TAG = \"RtspServer\";\r\n\r\n\t\r\n    public static String getLocalIpAddress() {\r\n    \t// http://www.droidnova.com/get-the-ip-address-of-your-device,304.html\r\n        try {\r\n            for (Enumeration<NetworkInterface> en = NetworkInterface.getNetworkInterfaces(); en.hasMoreElements();) {\r\n                NetworkInterface intf = en.nextElement();\r\n                for (Enumeration<InetAddress> enumIpAddr = intf.getInetAddresses(); enumIpAddr.hasMoreElements();) {\r\n                    InetAddress inetAddress = enumIpAddr.nextElement();\r\n                    if (!inetAddress.isLoopbackAddress()) {\r\n                        return inetAddress.getHostAddress().toString();\r\n                    }\r\n                }\r\n            }\r\n        } catch (SocketException ex) {\r\n            Log.e(\"RtspConstants\", ex.toString());\r\n        }\r\n        return null;\r\n    }\r\n\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/RtspClient.java",
    "content": "package de.kp.net.rtsp.client;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\nimport java.io.IOException;\nimport java.net.SocketException;\nimport java.net.URI;\nimport java.net.URISyntaxException;\nimport java.util.HashMap;\nimport java.util.Map;\n\nimport de.kp.net.rtsp.client.api.RequestListener;\nimport de.kp.net.rtsp.client.api.Message;\nimport de.kp.net.rtsp.client.api.MessageFactory;\nimport de.kp.net.rtsp.client.api.Request;\nimport de.kp.net.rtsp.client.api.Response;\nimport de.kp.net.rtsp.client.api.Transport;\nimport de.kp.net.rtsp.client.api.TransportListener;\nimport de.kp.net.rtsp.client.header.RtspHeader;\nimport de.kp.net.rtsp.client.header.SessionHeader;\nimport de.kp.net.rtsp.client.header.TransportHeader;\nimport de.kp.net.rtsp.client.header.TransportHeader.LowerTransport;\nimport de.kp.net.rtsp.client.message.MessageBuffer;\nimport de.kp.net.rtsp.client.message.RtspMessageFactory;\nimport de.kp.net.rtsp.client.request.RtspOptionsRequest;\nimport de.kp.net.rtsp.client.request.RtspRequest;\n\npublic class RtspClient implements TransportListener {\n\t\n\tprivate Transport transport;\n\n\tprivate MessageFactory messageFactory;\n\n\tprivate MessageBuffer messageBuffer;\n\n\tprivate volatile int cseq;\n\n\tprivate SessionHeader session;\n\n\t/**\n\t * URI kept from last setup.\n\t */\n\tprivate URI uri;\n\n\tprivate Map<Integer, RtspRequest> outstanding;\n\n\tprivate RequestListener clientListener;\n\n\tpublic RtspClient() {\n\n\t\tcseq = 0;\n\n\t\tmessageFactory = new RtspMessageFactory();\n\t\tmessageBuffer  = new MessageBuffer();\n\n\t\toutstanding = new HashMap<Integer, RtspRequest>();\n\n\t}\n\n\tpublic Transport getTransport() {\n\t\treturn transport;\n\t}\n\n\tpublic void setSession(SessionHeader session) {\n\t\tthis.session = session;\n\t}\n\n\tpublic MessageFactory getMessageFactory() {\n\t\treturn messageFactory;\n\t}\n\n\tpublic URI getURI() {\n\t\treturn uri;\n\t}\n\n\tpublic void options(String uri, URI endpoint) {\n\t\t\n\t\ttry {\n\t\t\t\n\t\t\tRtspOptionsRequest message = (RtspOptionsRequest) messageFactory.outgoingRequest(uri, RtspRequest.Method.OPTIONS, nextCSeq());\n\t\t\t// if (getTransport().isConnected() == false) message.addHeader(new RtspHeader(\"Connection\", \"close\"));\t\t\t\n\t\t\tsend(message, endpoint);\n\t\t\n\t\t} catch(Exception e) {\n\t\t\tif(clientListener != null) clientListener.onError(this, e);\n\t\t}\n\t}\n\n\tpublic void play() {\n\n\t\ttry {\n\t\t\tsend(messageFactory.outgoingRequest(uri.toString(), RtspRequest.Method.PLAY, nextCSeq(), session));\n\t\t\n\t\t} catch(Exception e) {\t\t\t\n\t\t\tif(clientListener != null) clientListener.onError(this, e);\n\t\t\n\t\t}\n\t}\n\n\tpublic void pause() {\n\t\t\n\t\ttry {\n\t\t\tsend(messageFactory.outgoingRequest(uri.toString(), RtspRequest.Method.PAUSE, nextCSeq(), session));\n\t\t\n\t\t} catch(Exception e) {\t\t\t\n\t\t\tif(clientListener != null) clientListener.onError(this, e);\n\t\t}\n\t}\n\n\tpublic void record() throws IOException {\n\t\tthrow new UnsupportedOperationException(\"Recording is not supported in current version.\");\n\t}\n\n\tpublic void setRequestListener(RequestListener listener) {\n\t\tclientListener = listener;\n\t}\n\n\tpublic RequestListener getRequestListener() {\n\t\treturn clientListener;\n\t}\n\n\tpublic void setTransport(Transport transport) {\n\t\tthis.transport = transport;\n\t\ttransport.setTransportListener(this);\n\t}\n\n\tpublic void describe(URI uri, String resource) {\n\n\t\tthis.uri = uri;\n\t\t\n\t\tString finalURI = uri.toString();\t\t\n\t\tif ((resource != null) && (resource.equals(\"*\") == false))\n\t\t\tfinalURI += '/' + resource;\n\t\t\n\t\ttry {\n\t\t\tsend(messageFactory.outgoingRequest(finalURI, RtspRequest.Method.DESCRIBE, nextCSeq(), new RtspHeader(\"Accept\", \"application/sdp\")));\n\t\t\n\t\t} catch(Exception e) {\n\t\t\tif(clientListener != null) clientListener.onError(this, e);\n\t\t}\n\t}\n\n\tpublic void setup(URI uri, int localPort) {\n\t\t\n\t\tthis.uri = uri;\n\t\ttry {\n\t\t\t\n\t\t\tString portParam = \"client_port=\" + localPort + \"-\" + (1 + localPort);\n\t\t\tsend(getSetup(uri.toString(), localPort, new TransportHeader(LowerTransport.DEFAULT, \"unicast\", portParam), session));\n\t\t\n\t\t} catch(Exception e) {\n\t\t\tif(clientListener != null) clientListener.onError(this, e);\n\t\t}\n\t}\n\n\tpublic void setup(URI uri, int localPort, String resource) {\n\t\t\n\t\tthis.uri = uri;\n\t\ttry {\n\t\t\t\n\t\t\tString portParam = \"client_port=\" + localPort + \"-\" + (1 + localPort);\n\t\t\tString finalURI = uri.toString();\n\t\t\t\n\t\t\tif ((resource != null) && (resource.equals(\"*\") == false))\n\t\t\t\tfinalURI += '/' + resource;\n\t\t\t\n\t\t\tsend(getSetup(finalURI, localPort, new TransportHeader(LowerTransport.DEFAULT, \"unicast\", portParam), session));\n\t\t\n\t\t} catch(Exception e) {\n\t\t\tif(clientListener != null) clientListener.onError(this, e);\n\t\t}\n\t}\n\n\tpublic void teardown() {\n\t\t\n\t\tif(session == null)\n\t\t\treturn;\n\t\t\n\t\ttry {\n\t\t\tsend(messageFactory.outgoingRequest(uri.toString(), RtspRequest.Method.TEARDOWN, nextCSeq(), session, new RtspHeader(\"Connection\", \"close\")));\n\t\t\n\t\t} catch(Exception e) {\n\t\t\tif(clientListener != null) clientListener.onError(this, e);\n\t\t\n\t\t}\n\t}\n\n\tpublic void dataReceived(Transport t, byte[] data, int size) throws Throwable {\n\t\t\n\t\tmessageBuffer.addData(data, size);\n\t\twhile(messageBuffer.getLength() > 0)\n\t\t\ttry\n\t\t\t{\n\t\t\t\tmessageFactory.incomingMessage(messageBuffer);\n\t\t\t\tmessageBuffer.discardData();\n\t\t\t\tMessage message = messageBuffer.getMessage();\n\t\t\t\tif(message instanceof RtspRequest)\n\t\t\t\t\tsend(messageFactory.outgoingResponse(405, \"Method Not Allowed\",\n\t\t\t\t\t\t\tmessage.getCSeq().getValue()));\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tRtspRequest request = null;\n\t\t\t\t\tsynchronized(outstanding)\n\t\t\t\t\t{\n\t\t\t\t\t\trequest = outstanding.remove(message.getCSeq().getValue());\n\t\t\t\t\t}\n\t\t\t\t\tResponse response = (Response) message;\n\t\t\t\t\trequest.handleResponse(this, response);\n\t\t\t\t\tclientListener.onSuccess(this, request, response);\n\t\t\t\t}\n\t\t\t} catch(Exception e)\n\t\t\t{\n\t\t\t\tmessageBuffer.discardData();\n\t\t\t\tif(clientListener != null)\n\t\t\t\t\tclientListener.onError(this, e.getCause());\n\t\t\t}\n\t}\n\n\t@Override\n\tpublic void dataSent(Transport t) throws Throwable\n\t{\n\t}\n\n\t@Override\n\tpublic void error(Transport t, Throwable error) {\n\t\tclientListener.onError(this, error);\n\t}\n\n\t@Override\n\tpublic void error(Transport t, Message message, Throwable error)\n\t{\n\t\tclientListener.onFailure(this, (RtspRequest) message, error);\n\t}\n\n\t@Override\n\tpublic void remoteDisconnection(Transport t) throws Throwable\n\t{\n\t\tsynchronized(outstanding)\n\t\t{\n\t\t\tfor(Map.Entry<Integer, RtspRequest> request : outstanding.entrySet())\n\t\t\t\tclientListener.onFailure(this, request.getValue(),\n\t\t\t\t\t\tnew SocketException(\"Socket has been closed\"));\n\t\t}\n\t}\n\n\tpublic int nextCSeq() {\n\t\treturn cseq++;\n\t}\n\n\tpublic void send(Message message) throws Exception {\n\t\tsend(message, uri);\n\t}\n\n\tprivate void send(Message message, URI endpoint) throws Exception\n\t{\n\t\tif(!transport.isConnected())\n\t\t\ttransport.connect(endpoint);\n\n\t\tif(message instanceof RtspRequest)\n\t\t{\n\t\t\tRtspRequest request = (RtspRequest) message;\n\t\t\tsynchronized(outstanding)\n\t\t\t{\n\t\t\t\toutstanding.put(message.getCSeq().getValue(), request);\n\t\t\t}\n\t\t\ttry\n\t\t\t{\n\t\t\t\ttransport.sendMessage(message);\n\t\t\t} catch(IOException e)\n\t\t\t{\n\t\t\t\tclientListener.onFailure(this, request, e);\n\t\t\t}\n\t\t} else\n\t\t\ttransport.sendMessage(message);\n\t}\n\n\tprivate Request getSetup(String uri, int localPort, RtspHeader... headers) throws URISyntaxException {\t\t\n\t\treturn getMessageFactory().outgoingRequest(uri, RtspRequest.Method.SETUP, nextCSeq(),\n\t\t\t\theaders);\n\t}\n\n\t@Override\n\tpublic void connected(Transport t) throws Throwable {\n\t\t// TODO Auto-generated method stub\n\t\t\n\t}\n}"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/RtspControl.java",
    "content": "package de.kp.net.rtsp.client;\r\n\r\nimport java.net.URI;\r\nimport de.kp.net.rtsp.RtspConstants;\r\nimport de.kp.net.rtsp.client.api.RequestListener;\r\nimport de.kp.net.rtsp.client.api.Request;\r\nimport de.kp.net.rtsp.client.api.Response;\r\nimport de.kp.net.rtsp.client.message.RtspDescriptor;\r\nimport de.kp.net.rtsp.client.message.RtspMedia;\r\nimport de.kp.net.rtsp.client.transport.TCPTransport;\r\n\r\npublic class RtspControl implements RequestListener {\r\n\r\n\t// reference to the RTSP client\r\n\tprivate RtspClient client;\r\n\r\n\t// flag to indicate whether there is a connection\r\n\t// established to a remote RTSP server\r\n\tprivate boolean connected = false;\r\n\r\n\t// reference to the RTSP server URI\r\n\tprivate URI uri;\r\n\r\n\tprivate int port;\r\n\r\n\tprivate String resource;\r\n\t\r\n\t// reference to the SDP file returned as a response\r\n\t// to a DESCRIBE request\r\n\tprivate RtspDescriptor rtspDescriptor;\r\n\r\n\tprivate int state;\r\n\r\n\t/**\r\n\t * This constructor is invoked with an uri that\r\n\t * describes the server uri and also a certain\r\n\t * resource\r\n\t */\r\n\r\n\tpublic RtspControl(String uri) {\t\r\n\r\n\t\tint pos = uri.lastIndexOf(\"/\");\r\n\r\n\t\ttry {\r\n\r\n\t\t\tthis.uri      = new URI(uri.substring(0, pos));\r\n\t\t\tthis.resource = uri.substring(pos+1);\r\n\r\n\t\t\t// initialize the RTSP communication\r\n\t\t\tthis.client = new RtspClient();\r\n\t\t\tthis.client.setTransport(new TCPTransport());\r\n\t\t\t\r\n\t\t\tthis.client.setRequestListener(this);\t\t\t\r\n\t\t\tthis.state = RtspConstants.UNDEFINED;\r\n\t\t\t\r\n\t\t\t// the OPTIONS request is used to invoke and\r\n\t\t\t// test the connection to the RTSP server,\r\n\t\t\t// specified with the URI provided\r\n\t\t\t\r\n\t\t\tthis.client.options(\"*\", this.uri);\r\n\r\n\t\t} catch (Exception e) {\r\n\t\t\t\r\n\t\t\tif (this.client != null) {\r\n\t\t\t\tonError(this.client, e);\r\n\t\t\t\t\r\n\t\t\t} else {\r\n\t\t\t\te.printStackTrace();\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t\t\r\n\t\t}\r\n\t\t\r\n\t}\r\n\r\n\tpublic RtspControl(String uri, String resource) {\r\n\t\t\t\t\r\n\t\ttry {\r\n\r\n\t\t\tthis.uri      = new URI(uri);\r\n\t\t\tthis.resource = resource;\r\n\r\n\t\t\t// initialize the RTSP communication\r\n\t\t\tthis.client = new RtspClient();\r\n\t\t\tthis.client.setTransport(new TCPTransport());\r\n\t\t\t\r\n\t\t\tthis.client.setRequestListener(this);\r\n\t\t\t\r\n\t\t\tthis.state = RtspConstants.UNDEFINED;\r\n\t\t\t\r\n\t\t\t// the OPTIONS request is used to invoke and\r\n\t\t\t// test the connection to the RTSP server,\r\n\t\t\t// specified with the URI provided\r\n\t\t\t\r\n\t\t\tthis.client.options(\"*\", this.uri);\r\n\r\n\t\t} catch (Exception e) {\r\n\t\t\t\r\n\t\t\tif (this.client != null) {\r\n\t\t\t\tonError(this.client, e);\r\n\t\t\t\t\r\n\t\t\t} else {\r\n\t\t\t\te.printStackTrace();\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t\t\r\n\t\t}\r\n\r\n\t}\r\n\t\r\n\tpublic void play() {\r\n\r\n\t\tif ((this.client == null) || (this.connected == false)) return;\r\n\r\n\t\tif (this.state == RtspConstants.READY) {\r\n\t\t\tthis.client.play();\t\t\r\n\t\t}\r\n\t\r\n\t}\r\n\t\r\n\tpublic void pause() {\r\n\r\n\t\tif ((this.client == null) || (this.connected == false)) return;\r\n\r\n\t\tif (this.state == RtspConstants.PLAYING) {\r\n\t\t\tthis.client.pause();\t\t\r\n\t\t}\r\n\r\n\t}\r\n\t\r\n\tpublic void stop() {\r\n\t\t\r\n\t\tif ((this.client == null) || (this.connected == false)) return;\r\n\t\t\r\n\t\t// send TEARDOWN request\r\n\t\tthis.client.teardown();\r\n\t\t\r\n\t}\r\n\r\n\tpublic boolean isConnected() {\r\n\t\treturn this.connected;\r\n\t}\r\n\t\r\n\tpublic int getState() {\r\n\t\treturn this.state;\r\n\t}\r\n\t\r\n\tpublic int getClientPort() {\r\n\t\treturn this.port;\r\n\t}\r\n\t\r\n\tpublic RtspDescriptor getDescriptor() {\r\n\t\treturn this.rtspDescriptor;\r\n\t}\r\n\t\r\n\t@Override\r\n\tpublic void onError(RtspClient client, Throwable error) {\r\n\r\n\t\tif ((this.client != null) && (this.connected == true)) {\r\n\t\t\tthis.client.teardown();\r\n\t\t}\r\n \t\t\r\n\t\tthis.state = RtspConstants.UNDEFINED;\r\n\t\tthis.connected = false;\r\n\t\t\r\n\t\tthis.client = null;\r\n\t\t\r\n\t}\r\n\r\n\t// register SDP file\r\n\tpublic void onDescriptor(RtspClient client, String descriptor) {\r\n\t\tthis.rtspDescriptor = new RtspDescriptor(descriptor);\t\t\r\n\t}\r\n\r\n\tpublic void onFailure(RtspClient client, Request request, Throwable cause) {\r\n\r\n\t\tif ((this.client != null) && (this.connected == true)) {\r\n\t\t\tthis.client.teardown();\r\n\t\t}\r\n \t\t\r\n\t\tthis.state = RtspConstants.UNDEFINED;\r\n\t\tthis.connected = false;\r\n\t\t\r\n\t\tthis.client = null;\r\n\t\t\r\n\t}\r\n\r\n\tpublic void onSuccess(RtspClient client, Request request, Response response) {\r\n\r\n\t\ttry {\r\n\r\n\t\t\tif ((this.client != null) && (response.getStatusCode() == 200)) {\r\n\t\t\t\t\r\n\t\t\t\tRequest.Method method = request.getMethod();\r\n\t\t\t\tif (method == Request.Method.OPTIONS) {\r\n\t\t\t\t\t// the response to an OPTIONS request\r\n\t\t\t\t\tthis.connected = true;\r\n\t\t\t\t\t\r\n\t\t\t\t\t// send DESCRIBE request\r\n\t\t\t\t\tthis.client.describe(this.uri, this.resource);\r\n\t\t\t\t\r\n\t\t\t\t} else if (method == Request.Method.DESCRIBE) {\r\n\t\t\t\t\t\r\n\t\t\t\t\t// set state to INIT\r\n\t\t\t\t\tthis.state = RtspConstants.INIT;\r\n\t\t\t\t\t\r\n\t\t\t\t\t/* \r\n\t\t\t\t\t * onSuccess is called AFTER onDescriptor method;\r\n\t\t\t\t\t * this implies, that a media resource is present\r\n\t\t\t\t\t * with a certain client port specified by the RTSP\r\n\t\t\t\t\t * server\r\n\t\t\t\t\t */\r\n\t\t\t\t\t\r\n\t\t\t\t\tRtspMedia video = this.rtspDescriptor.getFirstVideo();\r\n\t\t\t\t\tif (video != null) {\r\n\t\t\t\t\t\r\n\t\t\t\t\t\tthis.port = Integer.valueOf(video.getTransportPort());\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t// send SETUP request\r\n\t\t\t\t\t\tthis.client.setup(this.uri, this.port, this.resource);\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t} else if (method == Request.Method.SETUP) {\r\n\t\t\t\t\t\r\n\t\t\t\t\t// set state to READY\r\n\t\t\t\t\tthis.state = RtspConstants.READY;\r\n\r\n\t\t\t\t} else if (method == Request.Method.PLAY) {\r\n\t\t\t\t\t\r\n\t\t\t\t\t// set state to PLAYING\r\n\t\t\t\t\tthis.state = RtspConstants.PLAYING;\r\n\r\n\t\t\t\t} else if (method == Request.Method.PAUSE) {\r\n\t\t\t\t\t\r\n\t\t\t\t\t// set state to READY\r\n\t\t\t\t\tthis.state = RtspConstants.READY;\r\n\r\n\t\t\t\t} else if (method == Request.Method.TEARDOWN) {\r\n\r\n\t\t\t\t\tthis.connected = false;\r\n\t\t\t\t\t\r\n\t\t\t\t\t// set state to UNDEFINED\r\n\t\t\t\t\tthis.state = RtspConstants.UNDEFINED;\r\n\r\n\t\t\t\t}\r\n\t\t\t\r\n\t\t\t} else {\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t\t\r\n\t\t} catch (Exception e) {\r\n\t\t\tonError(this.client, e);\r\n\t\t\t\r\n\t\t}\r\n\t\t\r\n\t}\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/api/EntityMessage.java",
    "content": "package de.kp.net.rtsp.client.api;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\nimport de.kp.net.rtsp.client.header.RtspContent;\n\npublic interface EntityMessage {\n\t\n\tpublic RtspContent getContent();\n\t\n\tpublic void setContent(RtspContent content);\n\t\n\tpublic Message getMessage();\n\t\n\tpublic byte[] getBytes() throws Exception;\n\t\n\tpublic boolean isEntity();\n\t\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/api/Message.java",
    "content": "package de.kp.net.rtsp.client.api;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\nimport de.kp.net.rtsp.client.header.CSeqHeader;\nimport de.kp.net.rtsp.client.header.RtspHeader;\n\npublic interface Message {\n\t\n\tstatic String RTSP_TOKEN = \"RTSP/\";\n\n\tstatic String RTSP_VERSION = \"1.0\";\n\n\tstatic String RTSP_VERSION_TOKEN = RTSP_TOKEN + RTSP_VERSION;\n\n\t/**\n\t * \n\t * @return the Message line (the first line of the message)\n\t */\n\tpublic String getLine();\n\n\t/**\n\t * Returns a header, if exists\n\t * \n\t * @param name\n\t *          Name of the header to be searched\n\t * @return value of that header\n\t * @throws Exception\n\t */\n\tpublic RtspHeader getHeader(String name) throws Exception;\n\n\t/**\n\t * Convenience method to get CSeq.\n\t * \n\t * @return\n\t */\n\tpublic CSeqHeader getCSeq();\n\n\t/**\n\t * \n\t * @return all headers in the message, except CSeq\n\t */\n\tpublic RtspHeader[] getHeaders();\n\n\t/**\n\t * Adds a new header or replaces if one already exists. If header to be added\n\t * is a CSeq, implementation MUST keep reference of this header.\n\t * \n\t * @param header\n\t */\n\tpublic void addHeader(RtspHeader header);\n\n\t/**\n\t * \n\t * @return message as a byte array, ready for transmission.\n\t */\n\tpublic byte[] getBytes() throws Exception;\n\n\t/**\n\t * \n\t * @return Entity part of message, it exists.\n\t */\n\tpublic EntityMessage getEntityMessage();\n\n\t/**\n\t * \n\t * @param entity\n\t *          adds an entity part to the message.\n\t * @return this, for easier construction.\n\t */\n\tpublic Message setEntityMessage(EntityMessage entity);\n\t\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/api/MessageFactory.java",
    "content": "package de.kp.net.rtsp.client.api;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\nimport java.net.URISyntaxException;\n\nimport de.kp.net.rtsp.client.header.RtspContent;\nimport de.kp.net.rtsp.client.header.RtspHeader;\nimport de.kp.net.rtsp.client.message.MessageBuffer;\n\npublic interface MessageFactory {\n\n\tpublic void incomingMessage(MessageBuffer message) throws Exception;\n\n\tpublic Request outgoingRequest(String uri, Request.Method method, int cseq, RtspHeader... extras) throws URISyntaxException;\n\n\tpublic Request outgoingRequest(RtspContent body, String uri, Request.Method method, int cseq, RtspHeader... extras) throws URISyntaxException;\n\n\tpublic Response outgoingResponse(int code, String message, int cseq, RtspHeader... extras);\n\n\tpublic Response outgoingResponse(RtspContent body, int code, String text, int cseq, RtspHeader... extras);\n\t\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/api/Request.java",
    "content": "package de.kp.net.rtsp.client.api;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\nimport java.net.URISyntaxException;\n\nimport de.kp.net.rtsp.client.RtspClient;\n\npublic interface Request extends Message {\n\t\n\tenum Method {\n\t\tOPTIONS, DESCRIBE, SETUP, PLAY, PAUSE, RECORD, TEARDOWN\n\t};\n\n\tpublic void setLine(Method method, String uri) throws URISyntaxException;\n\t\n\tpublic Method getMethod();\n\t\n\tpublic String getURI();\n\t\n\tpublic void handleResponse(RtspClient client, Response response);\n\t\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/api/RequestListener.java",
    "content": "package de.kp.net.rtsp.client.api;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\nimport de.kp.net.rtsp.client.RtspClient;\n\npublic interface RequestListener {\n\n\tpublic void onDescriptor(RtspClient client, String descriptor);\n\t\n\tpublic void onError(RtspClient client, Throwable error);\n\n\tpublic void onFailure(RtspClient client, Request request, Throwable cause);\n\n\tpublic void onSuccess(RtspClient client, Request request, Response response);\n\t\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/api/Response.java",
    "content": "package de.kp.net.rtsp.client.api;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\npublic interface Response extends Message {\n\t\n\tpublic void setLine(int statusCode, String statusPhrase);\n\t\n\tpublic int getStatusCode();\n\t\n\tpublic String getStatusText();\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/api/Transport.java",
    "content": "package de.kp.net.rtsp.client.api;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\nimport java.io.IOException;\nimport java.net.URI;\n\n/**\n * This interface defines a transport protocol (TCP, UDP) or method (HTTP\n * tunneling). Transport also MUST enqueue a command if a connection is busy at\n * the moment it is issued.\n */\n\npublic interface Transport {\n\t\n\tpublic void connect(URI to) throws IOException;\n\n\tpublic void disconnect();\n\n\tpublic void sendMessage(Message message) throws Exception;\n\n\tpublic void setTransportListener(TransportListener listener);\n\n\tpublic void setUserData(Object data);\n\n\tpublic boolean isConnected();\n\t\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/api/TransportListener.java",
    "content": "package de.kp.net.rtsp.client.api;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\n/**\n * Listener for transport events. Implementations of {@link Transport}, when\n * calling a listener method, must catch all errors and submit them to the\n * error() method.\n */\npublic interface TransportListener {\n\t\n\tpublic void connected(Transport t) throws Throwable;\n\n\tpublic void error(Transport t, Throwable error);\n\n\tpublic void error(Transport t, Message message, Throwable error);\n\n\tpublic void remoteDisconnection(Transport t) throws Throwable;\n\n\tpublic void dataReceived(Transport t, byte[] data, int size) throws Throwable;\n\n\tpublic void dataSent(Transport t) throws Throwable;\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/header/CSeqHeader.java",
    "content": "package de.kp.net.rtsp.client.header;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\npublic class CSeqHeader extends RtspBaseIntegerHeader {\n\tpublic static final String NAME = \"CSeq\";\n\n\tpublic CSeqHeader() {\n\t\tsuper(NAME);\n\t}\n\t\n\tpublic CSeqHeader(int cseq) {\n\t\tsuper(NAME, cseq);\n\t}\n\t\n\tpublic CSeqHeader(String line) {\n\t\tsuper(line);\n\t}\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/header/ContentEncodingHeader.java",
    "content": "package de.kp.net.rtsp.client.header;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\npublic class ContentEncodingHeader extends RtspBaseStringHeader {\n\t\n\tpublic static final String NAME = \"Content-Encoding\";\n\n\tpublic ContentEncodingHeader() {\n\t\tsuper(NAME);\n\t}\n\n\tpublic ContentEncodingHeader(String header) {\n\t\tsuper(NAME, header);\n\t}\n\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/header/ContentLengthHeader.java",
    "content": "package de.kp.net.rtsp.client.header;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\npublic class ContentLengthHeader extends RtspBaseIntegerHeader {\n\n\tpublic static final String NAME = \"Content-Length\";\n\t\n\tpublic ContentLengthHeader() {\n\t\tsuper(NAME);\n\t}\n\t\n\tpublic ContentLengthHeader(int value) {\n\t\tsuper(NAME, value);\n\t}\n\n\tpublic ContentLengthHeader(String header) throws Exception {\n\t\tsuper(NAME, header);\n\t}\n\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/header/ContentTypeHeader.java",
    "content": "package de.kp.net.rtsp.client.header;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\npublic class ContentTypeHeader extends RtspBaseStringHeader {\n\t\n\tpublic static final String NAME = \"Content-Type\";\n\n\tpublic ContentTypeHeader() {\n\t\tsuper(NAME);\n\t}\n\n\tpublic ContentTypeHeader(String header) {\n\t\tsuper(NAME, header);\n\t}\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/header/RtspBaseIntegerHeader.java",
    "content": "package de.kp.net.rtsp.client.header;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\npublic class RtspBaseIntegerHeader extends RtspHeader {\n\t\n\tprivate int value;\n\t\n\tpublic RtspBaseIntegerHeader(String name) {\n\t\tsuper(name);\n\n\t\tString text = getRawValue();\n\t\tif(text != null) value = Integer.parseInt(text);\n\t\n\t}\n\t\n\tpublic RtspBaseIntegerHeader(String name, int value) {\n\t\tsuper(name);\n\t\tsetValue(value);\n\t}\n\n\tpublic RtspBaseIntegerHeader(String name, String header) throws Exception {\n\t\tsuper(header);\n\t\t\n\t\tcheckName(name);\n\t\tvalue = Integer.parseInt(getRawValue());\n\t\n\t}\n\n\tpublic final void setValue(int newValue) {\n\t\tvalue = newValue;\n\t\tsetRawValue(String.valueOf(value));\n\t}\n\t\n\tpublic final int getValue() {\n\t\treturn value;\n\t}\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/header/RtspBaseStringHeader.java",
    "content": "package de.kp.net.rtsp.client.header;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\npublic class RtspBaseStringHeader extends RtspHeader {\n\t\n\tpublic RtspBaseStringHeader(String name) {\n\t\tsuper(name);\n\t}\n\n\tpublic RtspBaseStringHeader(String name, String header) {\n\t\tsuper(header);\n\t\t\n\t\ttry {\n\t\t\tcheckName(name);\n\t\t\n\t\t} catch(Exception e) {\n\t\t\tsetName(name);\n\t\t}\n\t\n\t}\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/header/RtspContent.java",
    "content": "package de.kp.net.rtsp.client.header;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\nimport de.kp.net.rtsp.client.api.Message;\n\npublic class RtspContent {\n\t\n\tprivate String type;\n\n\tprivate String encoding;\n\n\tprivate byte[] content;\n\n\tpublic void setDescription(Message message) throws Exception {\n\t\t\n\t\ttype = message.getHeader(ContentTypeHeader.NAME).getRawValue();\n\t\ttry {\n\t\t\tencoding = message.getHeader(ContentEncodingHeader.NAME).getRawValue();\n\t\t\n\t\t} catch(Exception e) {\n\t\t}\n\t\t\n\t}\n\n\tpublic String getType() {\n\t\treturn type;\n\t}\n\n\tpublic void setType(String type) {\n\t\tthis.type = type;\n\t}\n\n\tpublic String getEncoding() {\n\t\treturn encoding;\n\t}\n\n\tpublic void setEncoding(String encoding) {\n\t\tthis.encoding = encoding;\n\t}\n\n\tpublic byte[] getBytes() {\n\t\treturn content;\n\t}\n\n\tpublic void setBytes(byte[] content) {\n\t\tthis.content = content;\n\t}\n\t\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/header/RtspHeader.java",
    "content": "package de.kp.net.rtsp.client.header;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\npublic class RtspHeader {\n\t\n\tprivate String name;\n\tprivate String value;\n\n\t/**\n\t * Constructs a new header.\n\t * \n\t * @param header\n\t *          if the character ':' (colon) is not found, it will be the name of\n\t *          the header. Otherwise, this constructor parses the header line.\n\t */\n\tpublic RtspHeader(String header) {\n\t\t\n\t\tint colon = header.indexOf(':');\n\t\tif(colon == -1)\n\t\t\tname = header;\n\t\t\n\t\telse {\n\t\t\tname = header.substring(0, colon);\n\t\t\tvalue = header.substring(++colon).trim();\n\t\t}\n\t}\n\t\n\tpublic RtspHeader(String name, String value) {\n\t\tthis.name = name;\n\t\tthis.value = value;\n\t}\n\n\tpublic String getName() {\n\t\treturn name;\n\t}\n\n\tpublic String getRawValue() {\n\t\treturn value;\n\t}\n\n\tpublic void setRawValue(String value) {\n\t\tthis.value = value;\n\t}\n\n\tpublic String toString() {\n\t\treturn name + \": \" + value;\n\t}\n\n\tpublic boolean equals(Object obj) {\n\t\t\n\t\tif(super.equals(obj))\n\t\t\treturn true;\n\t\t\n\t\tif(obj instanceof String)\n\t\t\treturn getName().equals(obj);\n\t\t\n\t\tif(obj instanceof RtspHeader)\n\t\t\treturn getName().equals(((RtspHeader) obj).getName());\n\t\t\n\t\treturn false;\n\t\n\t}\n\n\tprotected final void checkName(String expected) throws Exception {\n\n\t\tif(expected.equalsIgnoreCase(getName()) == false)\n\t\t\tthrow new Exception(\"[Header Mismatch] - Expected: \" + expected + \" Retrieved: \" + getName());\n\n\t}\n\n\tprotected final void setName(String name) {\n\t\tvalue = this.name;\n\t\tthis.name = name;\n\t}\n\t\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/header/SessionHeader.java",
    "content": "package de.kp.net.rtsp.client.header;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\npublic class SessionHeader extends RtspBaseStringHeader {\n\t\n\tpublic static final String NAME = \"Session\";\n\n\tpublic SessionHeader() {\n\t\tsuper(NAME);\n\t}\n\t\n\tpublic SessionHeader(String header) {\n\t\tsuper(NAME, header);\n\t}\n\t\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/header/TransportHeader.java",
    "content": "package de.kp.net.rtsp.client.header;\n/*\n   Copyright 2010 Voice Technology Ind. e Com. Ltda.\n \n   This file is part of RTSPClientLib.\n\n    RTSPClientLib is free software: you can redistribute it and/or modify\n    it under the terms of the GNU Lesser General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    RTSPClientLib is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU Lesser General Public License for more details.\n\n    You should have received a copy of the GNU Lesser General Public License\n    along with RTSPClientLib.  If not, see <http://www.gnu.org/licenses/>.\n\n*/\n\nimport java.util.Arrays;\nimport java.util.List;\n\n\n/**\n * Models a \"Transport\" header from RFC 2326. According to specification, there may be parameters, which will be inserted as a list of strings, which follow below:\n * <code>\n   parameter           =    ( \"unicast\" | \"multicast\" )\n                       |    \";\" \"destination\" [ \"=\" address ]\n                       |    \";\" \"interleaved\" \"=\" channel [ \"-\" channel ]\n                       |    \";\" \"append\"\n                       |    \";\" \"ttl\" \"=\" ttl\n                       |    \";\" \"layers\" \"=\" 1*DIGIT\n                       |    \";\" \"port\" \"=\" port [ \"-\" port ]\n                       |    \";\" \"client_port\" \"=\" port [ \"-\" port ]\n                       |    \";\" \"server_port\" \"=\" port [ \"-\" port ]\n                       |    \";\" \"ssrc\" \"=\" ssrc\n                       |    \";\" \"mode\" = <\"> 1\\#mode <\">\n   ttl                 =    1*3(DIGIT)\n   port                =    1*5(DIGIT)\n   ssrc                =    8*8(HEX)\n   channel             =    1*3(DIGIT)\n   address             =    host\n   mode                =    <\"> *Method <\"> | Method\n   </code>\n * @author paulo\n *\n */\npublic class TransportHeader extends RtspHeader {\n\t\n\tpublic static final String NAME = \"Transport\";\n\n\tpublic static enum LowerTransport {\n\t\tTCP, UDP, DEFAULT\n\t};\n\n\tprivate LowerTransport transport;\n\n\tprivate List<String> parameters;\n\n\tpublic TransportHeader(String header)\n\t{\n\t\tsuper(header);\n\t\tString value = getRawValue();\n\t\tif(!value.startsWith(\"RTP/AVP\"))\n\t\t\tthrow new IllegalArgumentException(\"Missing RTP/AVP\");\n\t\tint index = 7;\n\t\tif(value.charAt(index) == '/')\n\t\t{\n\t\t\tswitch(value.charAt(++index))\n\t\t\t{\n\t\t\tcase 'T':\n\t\t\t\ttransport = LowerTransport.TCP;\n\t\t\t\tbreak;\n\t\t\tcase 'U':\n\t\t\t\ttransport = LowerTransport.UDP;\n\t\t\t\tbreak;\n\t\t\tdefault:\n\t\t\t\tthrow new IllegalArgumentException(\"Invalid Transport: \"\n\t\t\t\t\t\t+ value.substring(7));\n\t\t\t}\n\t\t\tindex += 3;\n\t\t} else\n\t\t\ttransport = LowerTransport.DEFAULT;\n\t\tif(value.charAt(index) != ';' && index != value.length())\n\t\t\tthrow new IllegalArgumentException(\"Parameter block expected\");\n\t\taddParameters(value.substring(++index).split(\";\"));\n\t}\n\n\tpublic TransportHeader(LowerTransport transport, String... parameters)\n\t{\n\t\tsuper(NAME);\n\t\tthis.transport = transport;\n\t\taddParameters(parameters);\n\t}\n\n\tpublic String getParameter(String part)\n\t{\n\t\tfor(String parameter : parameters)\n\t\t\tif(parameter.startsWith(part))\n\t\t\t\treturn parameter;\n\t\tthrow new IllegalArgumentException(\"No such parameter named \" + part);\n\t}\n\n\tvoid addParameters(String[] parameterList)\n\t{\n\t\tif(parameters == null)\n\t\t\tparameters = Arrays.asList(parameterList);\n\t\telse\n\t\t\tparameters.addAll(Arrays.asList(parameterList));\n\t}\n\n\tLowerTransport getTransport()\n\t{\n\t\treturn transport;\n\t}\n\n\t@Override\n\tpublic String toString()\n\t{\n\t\tStringBuilder buffer = new StringBuilder(NAME).append(\": \").append(\"RTP/AVP\");\n\t\tif(transport != LowerTransport.DEFAULT)\n\t\t\tbuffer.append('/').append(transport);\n\t\tfor(String parameter : parameters)\n\t\t\tbuffer.append(';').append(parameter);\n\t\treturn buffer.toString();\n\t}\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/message/MessageBuffer.java",
    "content": "package de.kp.net.rtsp.client.message;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\nimport de.kp.net.rtsp.client.api.Message;\n\npublic class MessageBuffer {\n\t/**\n\t * buffer for received data\n\t */\n\tprivate byte[] data;\n\n\t/**\n\t * offset for starting useful area\n\t */\n\tprivate int offset;\n\n\t/**\n\t * length of useful portion.\n\t */\n\tprivate int length;\n\n\t/**\n\t * Used (read) buffer.\n\t */\n\tprivate int used;\n\n\t/**\n\t * {@link Message} created during last parsing.\n\t */\n\tprivate Message message;\n\n\t/**\n\t * Adds more data to buffer and ensures the sequence [data, newData] is\n\t * contiguous.\n\t * \n\t * @param newData data to be added to the buffer.\n\t */\n\tpublic void addData(byte[] newData, int newLength) {\n\t\t\n\t\tif (data == null) {\n\t\t\t\n\t\t\tdata = newData;\n\t\t\tlength = newLength;\n\t\t\toffset = 0;\n\t\t\n\t\t} else {\n\t\t\t\n\t\t\t// buffer seems to be small.\n\t\t\tif((data.length - offset - length) < newLength) {\n\t\t\t\t// try to sequeeze data at the beginning of the buffer only if current\n\t\t\t\t// buffer does not overlap\n\t\t\t\tif(offset >= length && (data.length - length) >= newLength) {\n\t\t\t\t\tSystem.arraycopy(data, offset, data, 0, length);\n\t\t\t\t\toffset = 0;\n\t\t\t\t\n\t\t\t\t} else { // worst-case scenario, a new buffer will have to be created\n\t\t\t\t\tbyte[] temp = new byte[data.length + newLength];\n\t\t\t\t\tSystem.arraycopy(data, offset, temp, 0, length);\n\t\t\t\t\toffset = 0;\n\t\t\t\t\tdata = temp;\n\t\t\t\t}\n\t\t\t}\n\t\t\t// there's room for everything - just copy\n\t\t\tSystem.arraycopy(newData, 0, data, offset + length, newLength);\n\t\t\tlength += newLength;\n\t\t}\n\t}\n\n\t/**\n\t * Discards used portions of the buffer.\n\t */\n\tpublic void discardData() {\n\t\toffset += used;\n\t\tlength -= used;\n\t}\n\n\tpublic byte[] getData() {\n\t\treturn data;\n\t}\n\n\tpublic int getOffset() {\n\t\treturn offset;\n\t}\n\n\tpublic int getLength() {\n\t\treturn length;\n\t}\n\n\tpublic void setMessage(Message message) {\n\t\tthis.message = message;\n\t}\n\n\tpublic Message getMessage() {\n\t\treturn message;\n\t}\n\n\tpublic void setused(int used) {\n\t\tthis.used = used;\n\t}\n}"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/message/RtspDescriptor.java",
    "content": "package de.kp.net.rtsp.client.message;\r\n\r\nimport java.util.ArrayList;\r\nimport java.util.List;\r\nimport java.util.StringTokenizer;\r\n\r\nimport de.kp.net.rtsp.RtspConstants;\r\n\r\npublic class RtspDescriptor {\r\n\r\n\tprivate static String SEP = \"\\r\\n\";\r\n\t\r\n\tprivate ArrayList<RtspMedia> mediaList;\r\n\t\r\n\tpublic RtspDescriptor(String descriptor) {\r\n\t\t\r\n\t\t// initialize media list\r\n\t\tmediaList = new ArrayList<RtspMedia>();\r\n\t\t\r\n\t\tRtspMedia mediaItem = null;\r\n\t\t\r\n\t\ttry {\r\n\t    \tStringTokenizer tokenizer = new StringTokenizer(descriptor, SEP);\r\n\t    \twhile (tokenizer.hasMoreTokens()) {\r\n\t    \t\t\r\n\t    \t\tString token = tokenizer.nextToken();\r\n\t    \t\tif (token.startsWith(\"m=\")) {\r\n\t    \t\t\t// a new media item is detected\r\n\t    \t\t\tmediaItem = new RtspMedia(token); \r\n    \t\t\t\tmediaList.add(mediaItem);\r\n\t    \t\t\t\r\n\t    \t\t\t\r\n\t    \t\t} else if (token.startsWith(\"a=\")) {\r\n\t    \t\t\tmediaItem.setAttribute(token);\t    \t\t\t\r\n\t    \t\t}\r\n\t\r\n\t    \t}\r\n\t    \t\r\n\t\t} catch (Exception e) {\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\t\t\r\n\t}\r\n\r\n\tpublic List<RtspMedia> getMediaList() {\r\n\t\treturn mediaList;\r\n\t}\r\n\t\r\n\tpublic RtspMedia getFirstVideo() {\r\n\t\t\r\n\t\tRtspMedia video = null;\r\n\t\tfor (RtspMedia mediaItem:this.mediaList) {\r\n\t\t\t\r\n\t\t\tif (mediaItem.getMediaType().equals(RtspConstants.SDP_VIDEO_TYPE)) {\r\n\t\t\t\tvideo = mediaItem;\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t\t\t\r\n\t\t}\r\n\t\t\r\n\t\treturn video;\r\n\t\t\r\n\t}\r\n\t\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/message/RtspEntityMessage.java",
    "content": "package de.kp.net.rtsp.client.message;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\nimport de.kp.net.rtsp.client.api.EntityMessage;\nimport de.kp.net.rtsp.client.api.Message;\nimport de.kp.net.rtsp.client.header.RtspContent;\nimport de.kp.net.rtsp.client.header.ContentEncodingHeader;\nimport de.kp.net.rtsp.client.header.ContentLengthHeader;\nimport de.kp.net.rtsp.client.header.ContentTypeHeader;\n\npublic class RtspEntityMessage implements EntityMessage {\n\t\n\tprivate RtspContent content;\n\n\tprivate final Message message;\n\n\tpublic RtspEntityMessage(Message message) {\n\t\tthis.message = message;\n\t}\n\t\n\tpublic RtspEntityMessage(Message message, RtspContent body) {\n\t\tthis(message);\n\t\tsetContent(body);\n\t}\n\t\n\t@Override\n\tpublic Message getMessage() {\n\t\treturn message;\n\t};\n\n\tpublic byte[] getBytes() throws Exception {\n\t\n\t\tmessage.getHeader(ContentTypeHeader.NAME);\n\t\tmessage.getHeader(ContentLengthHeader.NAME);\n\t\t\n\t\treturn content.getBytes();\n\t\n\t}\n\n\t@Override\n\tpublic RtspContent getContent() {\n\t\treturn content;\n\t}\n\n\t@Override\n\tpublic void setContent(RtspContent content) {\n\t\t\n\t\tif(content == null) throw new NullPointerException();\n\t\tthis.content = content;\n\t\t\n\t\tmessage.addHeader(new ContentTypeHeader(content.getType()));\n\t\tif(content.getEncoding() != null)\n\t\t\tmessage.addHeader(new ContentEncodingHeader(content.getEncoding()));\n\t\t\n\t\tmessage.addHeader(new ContentLengthHeader(content.getBytes().length));\n\t\n\t}\n\t\n\t@Override\n\tpublic boolean isEntity() {\n\t\treturn content != null;\n\t}\n\t\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/message/RtspMedia.java",
    "content": "package de.kp.net.rtsp.client.message;\r\n\r\npublic class RtspMedia {\r\n\r\n\tprivate String mediaType;\r\n\tprivate String mediaFormat;\r\n\t\r\n\tprivate String transportPort;\r\n\tprivate String transportProtocol;\r\n\t\r\n\tprivate String encoding;\r\n\tprivate String clockrate;\r\n\t\r\n\tprivate String framerate;\r\n\t\r\n\tprivate static String SDP_CONTROL   = \"a=control:\";\r\n\tprivate static String SDP_RANGE     = \"a=range:\";\r\n\tprivate static String SDP_LENGTH    = \"a=length:\";\r\n\tprivate static String SDP_RTMAP     = \"a=rtpmap:\";\r\n\tprivate static String SDP_FRAMERATE = \"a=framerate:\";\r\n\t\r\n\tpublic RtspMedia(String line) {\r\n\t\t\r\n\t\tString[] tokens = line.substring(2).split(\" \"); \r\n\t\t\r\n\t\tmediaType   = tokens[0];\t\t\r\n\t\tmediaFormat = tokens[3];\r\n\t\r\n\t\ttransportPort     = tokens[1];\r\n\t\ttransportProtocol = tokens[2];\r\n\t\t\r\n\t}\r\n\t\r\n\tpublic String getMediaType() {\r\n\t\treturn mediaType;\r\n\t}\r\n\t\r\n\tpublic String getFrameRate() {\r\n\t\treturn framerate;\r\n\t}\r\n\t\r\n\tpublic String getEncoding() {\r\n\t\treturn encoding;\r\n\t}\r\n\t\r\n\tpublic String getClockrate() {\r\n\t\treturn clockrate;\r\n\t}\r\n\t\r\n\tpublic String getTransportPort() {\r\n\t\treturn transportPort;\r\n\t}\r\n\t\r\n\tpublic void setAttribute(String line) throws Exception {\r\n\t\t\r\n\t\tif (line.startsWith(SDP_CONTROL)) {\r\n\t\t\t\r\n\t\t} else if (line.startsWith(SDP_RANGE)) {\r\n\r\n\t\t} else if (line.startsWith(SDP_LENGTH)) {\r\n\r\n\t\t} else if (line.startsWith(SDP_FRAMERATE)) {\r\n\r\n\t\t\tframerate = line.substring(SDP_FRAMERATE.length());\r\n\t\t\t\r\n\t\t} else if (line.startsWith(SDP_RTMAP)) {\r\n\t\t\t\r\n\t\t\tString[] tokens = line.substring(SDP_RTMAP.length()).split(\" \");\r\n\t\t\t\r\n\t\t\tString payloadType = tokens[0];\r\n\t\t\tif (payloadType.equals(mediaFormat) == false) throw new Exception(\"Corrupted Session Description - Payload Type\");\r\n\t\t\t\r\n\t\t\tif (tokens[1].contains(\"/\")) {\r\n\r\n\t\t\t\tString[] subtokens = tokens[1].split(\"/\");\r\n\t\t\t\t\r\n\t\t\t\tencoding  = subtokens[0];\r\n\t\t\t\tclockrate = subtokens[1];\r\n\t\t\t\t\r\n\t\t\t} else {\r\n\t\t\t\tencoding = tokens[1];\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t\t\r\n\t\t}\r\n\r\n\t}\r\n\t\r\n\tpublic String toString() {\r\n\t\treturn mediaType + \" \" + transportPort + \" \" + transportProtocol + \" \" + mediaFormat + \" \" + \r\n\t\t\tencoding + \"/\" + clockrate;\r\n\t}\r\n\t\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/message/RtspMessage.java",
    "content": "package de.kp.net.rtsp.client.message;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\nimport java.util.ArrayList;\nimport java.util.List;\n\nimport de.kp.net.rtsp.client.api.EntityMessage;\nimport de.kp.net.rtsp.client.api.Message;\nimport de.kp.net.rtsp.client.header.CSeqHeader;\nimport de.kp.net.rtsp.client.header.RtspHeader;\n\npublic abstract class RtspMessage implements Message {\n\n\tprivate String line;\n\n\tprivate List<RtspHeader> headers;\n\n\tprivate CSeqHeader cseq;\n\t\n\tprivate EntityMessage entity;\n\n\tpublic RtspMessage() {\n\t\theaders = new ArrayList<RtspHeader>();\n\t}\n\n\t@Override\n\tpublic byte[] getBytes() throws Exception {\n\t\t\n\t\tgetHeader(CSeqHeader.NAME);\n\t\taddHeader(new RtspHeader(\"User-Agent\", \"RtspClient\"));\n\t\t\n\t\tbyte[] message = toString().getBytes();\n\t\tif (getEntityMessage() != null) {\n\t\t\t\n\t\t\tbyte[] body = entity.getBytes();\n\t\t\tbyte[] full = new byte[message.length + body.length];\n\t\t\t\n\t\t\tSystem.arraycopy(message, 0, full, 0, message.length);\n\t\t\tSystem.arraycopy(body, 0, full, message.length, body.length);\n\t\t\t\n\t\t\tmessage = full;\n\t\t}\n\t\t\n\t\treturn message;\n\t\n\t}\n\n\t@Override\n\tpublic RtspHeader getHeader(final String name) throws Exception {\n\t\t\n\t\tint index = headers.indexOf(new Object() {\n\t\t\t@Override\n\t\t\tpublic boolean equals(Object obj) {\n\t\t\t\treturn name.equalsIgnoreCase(((RtspHeader) obj).getName());\n\t\t\t}\n\t\t});\n\t\t\n\t\t\n\t\tif(index == -1) throw new Exception(\"[Missing Header] \" + name);\n\t\t\n\t\treturn headers.get(index);\n\t\n\t}\n\n\t@Override\n\tpublic RtspHeader[] getHeaders() {\n\t\treturn headers.toArray(new RtspHeader[headers.size()]);\n\t}\n\n\t@Override\n\tpublic CSeqHeader getCSeq() {\n\t\treturn cseq;\n\t}\n\n\t@Override\n\tpublic String getLine() {\n\t\treturn line;\n\t}\n\n\tpublic void setLine(String line) {\n\t\tthis.line = line;\n\t}\n\t\n\t@Override\n\tpublic void addHeader(RtspHeader header) {\n\t\t\n\t\tif(header == null) return;\n\t\tif(header instanceof CSeqHeader)\n\t\t\tcseq = (CSeqHeader) header;\n\t\t\n\t\tint index = headers.indexOf(header);\n\t\tif(index > -1)\n\t\t\theaders.remove(index);\n\t\telse\n\t\t\tindex = headers.size();\n\t\t\n\t\theaders.add(index, header);\n\t\n\t}\n\t\n\t@Override\n\tpublic EntityMessage getEntityMessage() {\n\t\treturn entity;\n\t}\n\t\n\t@Override\n\tpublic Message setEntityMessage(EntityMessage entity) {\n\t\tthis.entity = entity;\n\t\treturn this;\n\t}\n\t\n\t@Override\n\tpublic String toString() {\n\t\t\n\t\tStringBuilder buffer = new StringBuilder();\n\t\tbuffer.append(getLine()).append(\"\\r\\n\");\n\t\t\n\t\tfor(RtspHeader header : headers)\n\t\t\tbuffer.append(header).append(\"\\r\\n\");\n\t\t\n\t\tbuffer.append(\"\\r\\n\");\n\t\treturn buffer.toString();\n\t\n\t}\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/message/RtspMessageFactory.java",
    "content": "package de.kp.net.rtsp.client.message;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\nimport java.io.ByteArrayInputStream;\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.lang.reflect.Constructor;\nimport java.net.URISyntaxException;\nimport java.util.HashMap;\nimport java.util.Map;\n\nimport de.kp.net.rtsp.client.api.Message;\nimport de.kp.net.rtsp.client.api.MessageFactory;\nimport de.kp.net.rtsp.client.api.Response;\nimport de.kp.net.rtsp.client.header.CSeqHeader;\nimport de.kp.net.rtsp.client.header.RtspContent;\nimport de.kp.net.rtsp.client.header.ContentEncodingHeader;\nimport de.kp.net.rtsp.client.header.ContentLengthHeader;\nimport de.kp.net.rtsp.client.header.ContentTypeHeader;\nimport de.kp.net.rtsp.client.header.RtspHeader;\nimport de.kp.net.rtsp.client.header.SessionHeader;\nimport de.kp.net.rtsp.client.header.TransportHeader;\nimport de.kp.net.rtsp.client.request.RtspDescribeRequest;\nimport de.kp.net.rtsp.client.request.RtspOptionsRequest;\nimport de.kp.net.rtsp.client.request.RtspPauseRequest;\nimport de.kp.net.rtsp.client.request.RtspPlayRequest;\nimport de.kp.net.rtsp.client.request.RtspRequest;\nimport de.kp.net.rtsp.client.request.RtspSetupRequest;\nimport de.kp.net.rtsp.client.request.RtspTeardownRequest;\nimport de.kp.net.rtsp.client.response.RtspResponse;\n\npublic class RtspMessageFactory implements MessageFactory {\n\t\n\tprivate static Map<String, Constructor<? extends RtspHeader>> headerMap;\n\tprivate static Map<RtspRequest.Method, Class<? extends RtspRequest>> requestMap;\n\n\tstatic {\n\t\t\n\t\theaderMap  = new HashMap<String, Constructor<? extends RtspHeader>>();\n\t\trequestMap = new HashMap<RtspRequest.Method, Class<? extends RtspRequest>>();\n\n\t\ttry {\n\t\n\t\t\tputHeader(ContentEncodingHeader.class);\n\t\t\tputHeader(ContentLengthHeader.class);\n\t\t\tputHeader(ContentTypeHeader.class);\n\t\t\tputHeader(CSeqHeader.class);\n\t\t\tputHeader(SessionHeader.class);\n\t\t\tputHeader(TransportHeader.class);\n\n\t\t\trequestMap.put(RtspRequest.Method.OPTIONS, \tRtspOptionsRequest.class);\n\t\t\trequestMap.put(RtspRequest.Method.SETUP, \tRtspSetupRequest.class);\n\t\t\trequestMap.put(RtspRequest.Method.TEARDOWN, RtspTeardownRequest.class);\n\t\t\trequestMap.put(RtspRequest.Method.DESCRIBE, RtspDescribeRequest.class);\n\t\t\trequestMap.put(RtspRequest.Method.PLAY, \tRtspPlayRequest.class);\n\t\t\trequestMap.put(RtspRequest.Method.PAUSE, \tRtspPauseRequest.class);\n\n\t\t} catch (Exception e) {\n\t\t\te.printStackTrace();\n\t\t}\n\t}\n\n\tprivate static void putHeader(Class<? extends RtspHeader> cls) throws Exception {\n\t\theaderMap.put(cls.getDeclaredField(\"NAME\").get(null).toString().toLowerCase(), cls.getConstructor(String.class));\n\t}\n\n\n\t/**\n\t * This method handles RTSP server responses\n\t */\n\tpublic void incomingMessage(MessageBuffer buffer) throws Exception {\n\n\t\tByteArrayInputStream in = new ByteArrayInputStream(buffer.getData(), buffer.getOffset(), buffer.getLength());\n\t\t\n\t\tint initial = in.available();\n\t\tMessage message = null;\n\n\t\ttry {\n\t\t\t// message line.\n\t\t\tString line = readLine(in);\n\t\t\tif (line.startsWith(Message.RTSP_TOKEN)) {\n\t\t\t\tmessage = new RtspResponse(line);\n\t\t\t\n\t\t\t} else {\n\t\t\t\t\n\t\t\t\tRtspRequest.Method method = null;\n\t\t\t\ttry {\n\t\t\t\t\tmethod = RtspRequest.Method.valueOf(line.substring(0, line.indexOf(' ')));\n\t\t\t\t\n\t\t\t\t} catch (IllegalArgumentException ilae) {\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tClass<? extends RtspRequest> cls = requestMap.get(method);\n\t\t\t\tif (cls != null)\n\t\t\t\t\tmessage = cls.getConstructor(String.class).newInstance(line);\n\t\t\t\t\n\t\t\t\telse\n\t\t\t\t\tmessage = new RtspRequest(line);\n\t\t\t\n\t\t\t}\n\n\t\t\twhile (true)\n\t\t\t{\n\t\t\t\tline = readLine(in);\n\t\t\t\tif (in == null)\n\t\t\t\t\tthrow new Exception();\n\t\t\t\tif (line.length() == 0)\n\t\t\t\t\tbreak;\n\t\t\t\tConstructor<? extends RtspHeader> c = headerMap.get(line.substring(0,\n\t\t\t\t\t\tline.indexOf(':')).toLowerCase());\n\t\t\t\tif (c != null)\n\t\t\t\t\tmessage.addHeader(c.newInstance(line));\n\t\t\t\telse\n\t\t\t\t\tmessage.addHeader(new RtspHeader(line));\n\t\t\t}\n\t\t\tbuffer.setMessage(message);\n\n\t\t\ttry\n\t\t\t{\n\t\t\t\tint length = ((ContentLengthHeader) message\n\t\t\t\t\t\t.getHeader(ContentLengthHeader.NAME)).getValue();\n\t\t\t\tif (in.available() < length)\n\t\t\t\t\tthrow new Exception();\n\t\t\t\tRtspContent content = new RtspContent();\n\t\t\t\tcontent.setDescription(message);\n\t\t\t\tbyte[] data = new byte[length];\n\t\t\t\tin.read(data);\n\t\t\t\tcontent.setBytes(data);\n\t\t\t\tmessage.setEntityMessage(new RtspEntityMessage(message, content));\n\t\t\t} catch (Exception e)\n\t\t\t{\n\t\t\t}\n\n\t\t} catch (Exception e)\n\t\t{\n\t\t\tthrow new Exception(e);\n\t\t} finally\n\t\t{\n\t\t\tbuffer.setused(initial - in.available());\n\t\t\ttry\n\t\t\t{\n\t\t\t\tin.close();\n\t\t\t} catch (IOException e)\n\t\t\t{\n\t\t\t}\n\t\t}\n\t}\n\n\t@Override\n\tpublic RtspRequest outgoingRequest(String uri, RtspRequest.Method method, int cseq, RtspHeader... extras) throws URISyntaxException {\n\t\t\n\t\tClass<? extends RtspRequest> cls = requestMap.get(method);\n\t\tRtspRequest message;\n\t\t\n\t\ttry {\n\t\t\tmessage = cls != null ? cls.newInstance() : new RtspRequest();\n\t\t\n\t\t} catch (Exception e) {\n\t\t\tthrow new RuntimeException(e);\n\t\t}\n\t\t\n\t\tmessage.setLine(method, uri);\n\t\tfillMessage(message, cseq, extras);\n\n\t\treturn message;\n\t}\n\n\t@Override\n\tpublic RtspRequest outgoingRequest(RtspContent body, String uri, RtspRequest.Method method, int cseq, RtspHeader... extras) throws URISyntaxException {\n\t\t\n\t\tMessage message = outgoingRequest(uri, method, cseq, extras);\n\t\treturn (RtspRequest) message.setEntityMessage(new RtspEntityMessage(message, body));\n\t\t\n\t}\n\n\t@Override\n\tpublic Response outgoingResponse(int code, String text, int cseq, RtspHeader... extras) {\n\n\t\tRtspResponse message = new RtspResponse();\n\t\tmessage.setLine(code, text);\n\t\t\n\t\tfillMessage(message, cseq, extras);\n\t\treturn message;\n\t\t\n\t}\n\n\t@Override\n\tpublic Response outgoingResponse(RtspContent body, int code, String text, int cseq, RtspHeader... extras) {\n\t\t\n\t\tMessage message = outgoingResponse(code, text, cseq, extras);\n\t\treturn (Response) message.setEntityMessage(new RtspEntityMessage(message, body));\n\t\n\t}\n\n\tprivate void fillMessage(Message message, int cseq, RtspHeader[] extras) {\n\t\t\n\t\tmessage.addHeader(new CSeqHeader(cseq));\n\t\tfor (RtspHeader h : extras)\n\t\t\tmessage.addHeader(h);\n\t\n\t}\n\n\tprivate String readLine(InputStream in) throws IOException {\n\n\t\tint ch = 0;\n\t\t\n\t\tStringBuilder b = new StringBuilder();\n\t\tfor (ch = in.read(); ch != -1 && ch != 0x0d && ch != 0x0a; ch = in.read())\n\t\t\tb.append((char) ch);\n\t\t\n\t\tif (ch == -1)\n\t\t\treturn null;\n\t\t\n\t\tin.read();\n\t\treturn b.toString();\n\t}\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/request/RtspDescribeRequest.java",
    "content": "package de.kp.net.rtsp.client.request;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\nimport java.net.URISyntaxException;\n\nimport de.kp.net.rtsp.client.RtspClient;\nimport de.kp.net.rtsp.client.api.Response;\n\npublic class RtspDescribeRequest extends RtspRequest {\n\n\tpublic RtspDescribeRequest() {\n\t\tsuper();\n\t}\n\n\tpublic RtspDescribeRequest(String messageLine) throws URISyntaxException {\n\t\tsuper(messageLine);\n\t}\n\n\t@Override\n\tpublic byte[] getBytes() throws Exception {\n\t\tgetHeader(\"Accept\");\n\t\treturn super.getBytes();\n\t}\n\n\t@Override\n\tpublic void handleResponse(RtspClient client, Response response) {\n\t\tsuper.handleResponse(client, response);\n\t\t\n\t\ttry {\n\t\t\tclient.getRequestListener().onDescriptor(client, new String(response.getEntityMessage().getContent().getBytes()));\n\t\t\n\t\t} catch(Exception e) {\n\t\t\tclient.getRequestListener().onError(client, e);\n\t\t}\n\t\n\t}\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/request/RtspOptionsRequest.java",
    "content": "package de.kp.net.rtsp.client.request;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\nimport java.net.URI;\nimport java.net.URISyntaxException;\n\npublic class RtspOptionsRequest extends RtspRequest {\n\t\n\tpublic RtspOptionsRequest() {\n\t}\n\t\n\tpublic RtspOptionsRequest(String line) throws URISyntaxException {\n\t\tsuper(line);\n\t}\n\t\n\t@Override\n\tpublic void setLine(Method method, String uri) throws URISyntaxException {\n\t\t\n\t\tsetMethod(method);\n\t\tsetURI(\"*\".equals(uri) ? uri : new URI(uri).toString());\n\t\t\n\t\tsuper.setLine(method.toString() + ' ' + uri + ' ' + RTSP_VERSION_TOKEN);\n\t}\n\t\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/request/RtspPauseRequest.java",
    "content": "package de.kp.net.rtsp.client.request;\r\n\r\nimport java.net.URISyntaxException;\r\n\r\nimport de.kp.net.rtsp.client.header.SessionHeader;\r\n\r\npublic class RtspPauseRequest extends RtspRequest {\r\n\r\n\tpublic RtspPauseRequest() {\r\n\t}\r\n\t\r\n\tpublic RtspPauseRequest(String messageLine) throws URISyntaxException {\r\n\t\tsuper(messageLine);\r\n\t}\r\n\r\n\t@Override\r\n\tpublic byte[] getBytes() throws Exception {\r\n\t\tgetHeader(SessionHeader.NAME);\r\n\t\treturn super.getBytes();\r\n\t}\r\n\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/request/RtspPlayRequest.java",
    "content": "package de.kp.net.rtsp.client.request;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\nimport java.net.URISyntaxException;\n\nimport de.kp.net.rtsp.client.header.SessionHeader;\n\npublic class RtspPlayRequest extends RtspRequest {\n\n\tpublic RtspPlayRequest() {\n\t}\n\t\n\tpublic RtspPlayRequest(String messageLine) throws URISyntaxException {\n\t\tsuper(messageLine);\n\t}\n\n\t@Override\n\tpublic byte[] getBytes() throws Exception {\n\t\tgetHeader(SessionHeader.NAME);\n\t\treturn super.getBytes();\n\t}\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/request/RtspRequest.java",
    "content": "package de.kp.net.rtsp.client.request;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\nimport java.net.URI;\nimport java.net.URISyntaxException;\n\nimport de.kp.net.rtsp.client.RtspClient;\nimport de.kp.net.rtsp.client.api.Message;\nimport de.kp.net.rtsp.client.api.Request;\nimport de.kp.net.rtsp.client.api.Response;\nimport de.kp.net.rtsp.client.message.RtspMessage;\n\npublic class RtspRequest extends RtspMessage implements Request {\n\t\n\tprivate Method method;\n\n\tprivate String uri;\n\n\tpublic RtspRequest() {\n\t}\n\n\tpublic RtspRequest(String messageLine) throws URISyntaxException {\n\t\tString[] parts = messageLine.split(\" \");\n\t\tsetLine(Method.valueOf(parts[0]), parts[1]);\n\t}\n\n\t@Override\n\tpublic void setLine(Method method, String uri) throws URISyntaxException {\n\t\t\n\t\tthis.method = method;\n\t\tthis.uri = new URI(uri).toString();\n\t\t;\n\n\t\tsuper.setLine(method.toString() + ' ' + uri + ' ' + RTSP_VERSION_TOKEN);\n\t\n\t}\n\n\t@Override\n\tpublic Method getMethod() {\n\t\treturn method;\n\t}\n\n\t@Override\n\tpublic String getURI() {\n\t\treturn uri;\n\t}\n\n\t@Override\n\tpublic void handleResponse(RtspClient client, Response response) {\n\t\t\n\t\tif (testForClose(client, this) || testForClose(client, response))\n\t\t\tclient.getTransport().disconnect();\n\t\n\t}\n\n\tprotected void setURI(String uri) {\n\t\tthis.uri = uri;\n\t}\n \n\tprotected void setMethod(Method method) {\n\t\tthis.method = method;\n\t}\n\n\tprivate boolean testForClose(RtspClient client, Message message) {\n\t\t\n\t\ttry {\n\t\t\treturn message.getHeader(\"Connection\").getRawValue().equalsIgnoreCase(\"close\");\n\t\t\n\t\t} catch(Exception e) {\n\t\t\t\n\t\t\t// this is an expected exception in case of no\n\t\t\t// connection close in the response message\n\t\t\t\n\t\t\t// client.getRequestListener().onError(client, e);\n\t\t}\n\n\t\treturn false;\n\t\n\t}\n}"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/request/RtspSetupRequest.java",
    "content": "package de.kp.net.rtsp.client.request;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\nimport java.net.URISyntaxException;\n\nimport de.kp.net.rtsp.client.RtspClient;\nimport de.kp.net.rtsp.client.api.Response;\nimport de.kp.net.rtsp.client.header.SessionHeader;\n\npublic class RtspSetupRequest extends RtspRequest {\n\tpublic RtspSetupRequest() {\n\t}\n\n\tpublic RtspSetupRequest(String line) throws URISyntaxException {\n\t\tsuper(line);\n\t}\n\n\t@Override\n\tpublic byte[] getBytes() throws Exception {\n\t\tgetHeader(\"Transport\");\n\t\treturn super.getBytes();\n\t}\n\n\t@Override\n\tpublic void handleResponse(RtspClient client, Response response) {\n\t\t\n\t\tsuper.handleResponse(client, response);\n\t\ttry {\n\t\t\tif(response.getStatusCode() == 200)\n\t\t\t\tclient.setSession((SessionHeader) response.getHeader(SessionHeader.NAME));\n\t\t\n\t\t} catch(Exception e) {\n\t\t\tclient.getRequestListener().onError(client, e);\n\t\t}\n\t\t\n\t}\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/request/RtspTeardownRequest.java",
    "content": "package de.kp.net.rtsp.client.request;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\nimport java.net.URISyntaxException;\n\nimport de.kp.net.rtsp.client.RtspClient;\nimport de.kp.net.rtsp.client.api.Response;\nimport de.kp.net.rtsp.client.header.SessionHeader;\n\npublic class RtspTeardownRequest extends RtspRequest {\n\n\tpublic RtspTeardownRequest() {\n\t\tsuper();\n\t}\n\n\tpublic RtspTeardownRequest(String messageLine) throws URISyntaxException {\n\t\tsuper(messageLine);\n\t}\n\n\t@Override\n\tpublic byte[] getBytes() throws Exception {\n\t\tgetHeader(SessionHeader.NAME);\n\t\treturn super.getBytes();\n\t}\n\t\n\t@Override\n\tpublic void handleResponse(RtspClient client, Response response) {\n\t\tsuper.handleResponse(client, response);\n\t\t\n\t\tif(response.getStatusCode() == 200) client.setSession(null);\n\t\tclient.getTransport().disconnect();\n\t\n\t}\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/response/RtspResponse.java",
    "content": "package de.kp.net.rtsp.client.response;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\nimport de.kp.net.rtsp.client.api.Response;\nimport de.kp.net.rtsp.client.message.RtspMessage;\n\npublic class RtspResponse extends RtspMessage implements Response {\n\t\n\tprivate int status;\n\tprivate String text;\n\n\tpublic RtspResponse() {\n\t}\n\n\tpublic RtspResponse(String line) {\n\t\t\n\t\tsetLine(line);\n\t\tline = line.substring(line.indexOf(' ') + 1);\n\t\t\n\t\tstatus = Integer.parseInt(line.substring(0, line.indexOf(' ')));\n\t\ttext = line.substring(line.indexOf(' ') + 1);\n\t\n\t}\n\n\t@Override\n\tpublic int getStatusCode() {\n\t\treturn status;\n\t}\n\t\n\t@Override\n\tpublic String getStatusText() {\n\t\treturn text;\n\t}\n\n\t@Override\n\tpublic void setLine(int statusCode, String statusText) {\n\t\n\t\tstatus = statusCode;\n\t\ttext   = statusText;\n\t\t\n\t\tsuper.setLine(RTSP_VERSION_TOKEN + ' ' + status + ' ' + text);\n\t\n\t}\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/transport/TCPTransport.java",
    "content": "package de.kp.net.rtsp.client.transport;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\nimport java.io.IOException;\nimport java.net.Socket;\nimport java.net.URI;\n\nimport de.kp.net.rtsp.client.api.Message;\nimport de.kp.net.rtsp.client.api.Transport;\nimport de.kp.net.rtsp.client.api.TransportListener;\n\nclass TCPTransportThread extends Thread {\n\t\n\tprivate final TCPTransport transport;\n\n\tprivate volatile TCPTransportListener listener;\n\n\tpublic TCPTransportThread(TCPTransport transport, TransportListener listener) {\n\t\tthis.transport = transport;\n\t\tthis.listener  = new TCPTransportListener(listener);\n\t}\n\n\tpublic TCPTransportListener getListener() {\n\t\treturn listener;\n\t}\n\n\tpublic void setListener(TransportListener listener) {\n\t\tlistener = new TCPTransportListener(listener);\n\t}\n\n\t@Override\n\tpublic void run() {\n\t\t\n\t\tlistener.connected(transport);\n\t\t\n\t\tbyte[] buffer = new byte[2048];\n\t\t\n\t\tint read = -1;\n\t\twhile(transport.isConnected()) {\n\t\t\t\n\t\t\ttry {\n\t\t\t\tread = transport.receive(buffer);\n\t\t\t\tif(read == -1)\n\t\t\t\t{\n\t\t\t\t\ttransport.setConnected(false);\n\t\t\t\t\tlistener.remoteDisconnection(transport);\n\t\t\t\t} else\n\t\t\t\t\tlistener.dataReceived(transport, buffer, read);\n\t\t\t\n\t\t\t} catch(IOException e) {\n\t\t\t\tlistener.error(transport, e);\n\t\t\t}\n\t\t}\n\t}\n}\n\npublic class TCPTransport implements Transport {\n\t\n\tprivate Socket socket;\n\n\tprivate TCPTransportThread thread;\n\tprivate TransportListener transportListener;\n\n\tprivate volatile boolean connected;\n\n\tpublic TCPTransport() {\n\t}\n\n\t@Override\n\tpublic void connect(URI to) throws IOException {\n\t\t\n\t\tif(connected)\n\t\t\tthrow new IllegalStateException(\"Socket is still open. Close it first\");\n\t\t\n\t\tint port = to.getPort();\n\t\tif(port == -1) port = 554;\n\t\t\n\t\tsocket = new Socket(to.getHost(), port);\n\t\t\n\t\tsetConnected(true);\n\t\tthread = new TCPTransportThread(this, transportListener);\n\t\tthread.start();\n\t\n\t}\n\n\t@Override\n\tpublic void disconnect() {\n\t\t\n\t\tsetConnected(false);\n\t\ttry {\n\t\t\tsocket.close();\n\t\t\n\t\t} catch(IOException e) {\n\t\t}\n\t}\n\n\t@Override\n\tpublic boolean isConnected() {\n\t\treturn connected;\n\t}\n\n\t@Override\n\tpublic synchronized void sendMessage(Message message) throws Exception {\n\t\t\n\t\tsocket.getOutputStream().write(message.getBytes());\n\t\tthread.getListener().dataSent(this);\n\t\n\t}\n\n\t@Override\n\tpublic void setTransportListener(TransportListener listener) {\n\t\ttransportListener = listener;\n\t\tif(thread != null)\n\t\t\tthread.setListener(listener);\n\t}\n\n\t@Override\n\tpublic void setUserData(Object data) {\n\t}\n\n\tint receive(byte[] data) throws IOException {\n\t\treturn socket.getInputStream().read(data);\n\t}\n\n\tvoid setConnected(boolean connected) {\n\t\tthis.connected = connected;\n\t}\n}"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/client/transport/TCPTransportListener.java",
    "content": "package de.kp.net.rtsp.client.transport;\n/**\n *\tCopyright 2010 Voice Technology Ind. e Com. Ltda.\n *\n *\tRTSPClientLib is free software: you can redistribute it and/or \n *\tmodify it under the terms of the GNU Lesser General Public License \n *\tas published by the Free Software Foundation, either version 3 of \n *\tthe License, or (at your option) any later version.\n *\n *\tRTSPClientLib is distributed in the hope that it will be useful,\n *\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n *\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n *\tGNU Lesser General Public License for more details. \n *\n *\tYou should have received a copy of the GNU Lesser General Public License\n *\talong with this software. If not, see <http://www.gnu.org/licenses/>.\n * \n *\n *\tThis class has been adapted to the needs of the RtspCamera project\n *\t@author Stefan Krusche (krusche@dr-kruscheundpartner.de)\n *\n */\n\nimport de.kp.net.rtsp.client.api.Message;\nimport de.kp.net.rtsp.client.api.Transport;\nimport de.kp.net.rtsp.client.api.TransportListener;\n\nclass TCPTransportListener implements TransportListener {\n\t\n\tprivate final TransportListener behaviour;\n\n\tpublic TCPTransportListener(TransportListener theBehaviour) {\n\t\tbehaviour = theBehaviour;\n\t}\n\n\t@Override\n\tpublic void connected(Transport t) {\n\t\tif (behaviour != null)\n\t\t\ttry {\n\t\t\t\tbehaviour.connected(t);\n\t\t\t\n\t\t\t} catch(Throwable error) {\n\t\t\t\tbehaviour.error(t, error);\n\t\t\t}\n\t}\n\n\t@Override\n\tpublic void dataReceived(Transport t, byte[] data, int size) {\n\t\t\n\t\tif (behaviour != null)\n\t\t\ttry {\n\t\t\t\tbehaviour.dataReceived(t, data, size);\n\t\t\t\n\t\t\t} catch(Throwable error) {\n\t\t\t\tbehaviour.error(t, error);\n\t\t\t}\n\t}\n\n\t@Override\n\tpublic void dataSent(Transport t) {\n\t\t// TODO Auto-generated method stub\n\t\tif (behaviour != null)\n\t\t\ttry {\n\t\t\t\tbehaviour.dataSent(t);\n\t\t\t\n\t\t\t} catch(Throwable error) {\n\t\t\t\tbehaviour.error(t, error);\n\t\t\t}\n\n\t}\n\n\t@Override\n\tpublic void error(Transport t, Throwable error) {\n\t\tif (behaviour != null)\n\t\t\tbehaviour.error(t, error);\n\t}\n\n\t@Override\n\tpublic void error(Transport t, Message message, Throwable error) {\n\t\tif(behaviour != null)\n\t\t\tbehaviour.error(t, message, error);\n\t}\n\n\t@Override\n\tpublic void remoteDisconnection(Transport t) {\n\t\tif (behaviour != null)\n\t\t\ttry {\n\t\t\t\tbehaviour.remoteDisconnection(t);\n\t\t\t\n\t\t\t} catch(Throwable error) {\n\t\t\t\tbehaviour.error(t, error);\n\t\t\t}\n\t\n\t}\n\n}\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/server/RtspServer.java",
    "content": "package de.kp.net.rtsp.server;\r\n\r\nimport java.io.BufferedReader;\r\nimport java.io.BufferedWriter;\r\nimport java.io.IOException;\r\nimport java.io.InputStreamReader;\r\nimport java.io.OutputStreamWriter;\r\nimport java.net.InetAddress;\r\nimport java.net.ServerSocket;\r\nimport java.net.Socket;\r\nimport java.util.Vector;\r\n\r\nimport android.util.Log;\r\n\r\nimport de.kp.net.rtp.RtpSender;\r\nimport de.kp.net.rtp.RtpSocket;\r\nimport de.kp.net.rtsp.RtspConstants;\r\nimport de.kp.net.rtsp.RtspConstants.VideoEncoder;\r\nimport de.kp.net.rtsp.server.response.Parser;\r\nimport de.kp.net.rtsp.server.response.RtspDescribeResponse;\r\nimport de.kp.net.rtsp.server.response.RtspError;\r\nimport de.kp.net.rtsp.server.response.RtspOptionsResponse;\r\nimport de.kp.net.rtsp.server.response.RtspPauseResponse;\r\nimport de.kp.net.rtsp.server.response.RtspPlayResponse;\r\nimport de.kp.net.rtsp.server.response.RtspResponse;\r\nimport de.kp.net.rtsp.server.response.RtspResponseTeardown;\r\nimport de.kp.net.rtsp.server.response.RtspSetupResponse;\r\n\r\n/**\r\n * This class describes a RTSP streaming\r\n * server for Android platforms. RTSP is\r\n * used to control video streaming from\r\n * a remote user agent.\r\n * \r\n * @author Stefan Krusche (krusche@dr-kruscheundpartner.de)\r\n *\r\n */\r\n\r\npublic class RtspServer implements Runnable {\r\n\r\n\t// reference to the server socket\r\n\tprivate ServerSocket serverSocket;\r\n\t\r\n\t// indicator to determine whether the server has stopped or not\r\n\tprivate boolean stopped = false;\r\n\r\n\t// inidicator to describe whether the server all of its threads\r\n\t// are terminated\r\n\tprivate boolean terminated = false;\r\n\t\r\n\t// reference to the video encoder (H263, H264) used over RTP \r\n\tprivate VideoEncoder encoder;\r\n\r\n\t// a temporary cache to manage all threads initiated by the RTSP server\r\n\tprivate Vector<Thread> serverThreads;\r\n\t\r\n\tpublic RtspServer(int port, VideoEncoder encoder) throws IOException {\t\t\r\n\t\r\n\t\tthis.serverThreads = new Vector<Thread>();\r\n\t\t\r\n\t\tthis.encoder = encoder;\r\n\t    this.serverSocket = new ServerSocket(port);\t  \r\n\t\r\n\t}\r\n\r\n\tpublic void run() {\r\n\t    \r\n\t    /*\r\n\t     * In order to communicate with different clients,\r\n\t     * we construct a thread for each client that is\r\n\t     * connected.\r\n\t     */\r\n\t    while (this.stopped == false) {\r\n\t    \t\r\n\t\t\ttry {\r\n\t\t\t\tSocket  clientSocket = this.serverSocket.accept();\r\n\t\t    \tserverThreads.add(new ServerThread(clientSocket, this.encoder));\r\n\r\n\t\t\t} catch (IOException e) {\r\n\t\t\t\te.printStackTrace();\r\n\t\t\t}\r\n\t    \r\n\t    }\r\n\t\t\r\n\t}\r\n\r\n\tpublic boolean isTerminated() {\r\n\t\treturn this.terminated;\r\n\t}\r\n\t\r\n\t/**\r\n\t * This method is used to stop the RTSP server\r\n\t */\r\n\r\n\tpublic void stop() {\r\n\t\t\r\n\t\tthis.stopped = true;\r\n\t\tterminate();\r\n\r\n\t\ttry {\r\n\t\t\tthis.serverSocket.close();\r\n\t\t\r\n\t\t} catch (IOException e) {\t\r\n\t\t\t// nothing todo\r\n\t\t}\r\n\t\r\n\t}\r\n\t\r\n\t/**\r\n\t * This method is used to tear down all threads that have\r\n\t * been invoked by the RTSP server during life time\r\n\t */\r\n\tprivate void terminate() {\r\n\t\t\r\n\t\tfor (Thread serverThread:serverThreads) {\r\n\t\t\tif (serverThread.isAlive()) serverThread.interrupt();\r\n\t\t}\r\n\t\t\r\n\t\tthis.terminated = true;\r\n\t\t\r\n\t}\r\n\t\r\n\tprivate class ServerThread extends Thread {\r\n\t\t\r\n\t\tprivate String TAG = \"RtspServer\";\r\n\r\n\t\t// response to RTSP client\r\n\t\tprivate RtspResponse rtspResponse;\r\n\t\t\r\n\t\tprivate String contentBase = \"\";\r\n\t\t\r\n\t\t/*\r\n\t\t * input and output stream buffer for TCP connection; \r\n\t\t * UDP response are sent through DatagramSocket\r\n\t\t */\r\n\t\tprivate BufferedReader rtspBufferedReader;\r\n\t\tprivate BufferedWriter rtspBufferedWriter;\r\n\r\n\t\tprivate int rtspState;\r\n\t\t\r\n\t\t// Sequence number of RTSP messages within the session\t\r\n\t\tprivate int cseq = 0;\t\r\n\t\t\r\n\t\tprivate int clientPort;\r\n\t\t\r\n\t\t// remote (client) address\r\n\t\tprivate InetAddress clientAddress;\r\n\t\t\r\n\t\t/*\r\n\t\t * This datagram socket is used to send UDP\r\n\t\t * packets to the clientIPAddress\r\n\t\t */\r\n\t\tprivate RtpSocket rtpSocket;\r\n\r\n\t\tprivate final Socket clientSocket;\r\n\r\n\t\tprivate VideoEncoder encoder;\r\n\r\n\t    public ServerThread(Socket socket, VideoEncoder encoder) {\r\n\t    \t\r\n\t    \tthis.clientSocket = socket;\r\n\t    \tthis.encoder = encoder;\r\n\t    \t\r\n\t    \t// register IP address of requesting client\r\n\t    \tthis.clientAddress = this.clientSocket.getInetAddress();\r\n\t    \t\r\n\t    \tstart();\r\n\t    \r\n\t    }\r\n\t    \r\n\t    public void run() {\r\n\t    \t\r\n\t    \t// prepare server response\r\n\t    \tString response = \"\";\r\n\t    \t\r\n\t    \ttry {\r\n\r\n\t    \t\t// Set input and output stream filters\r\n\t    \t\trtspBufferedReader = new BufferedReader(new InputStreamReader(this.clientSocket.getInputStream()) );\r\n\t    \t\trtspBufferedWriter = new BufferedWriter(new OutputStreamWriter(this.clientSocket.getOutputStream()) );\r\n\r\n\t    \t\tboolean setup = false;\r\n\t    \t\t \r\n\t    \t\twhile (setup == false) {\r\n\t    \t\t\t\r\n\t    \t\t\t// determine request type and also provide\r\n\t    \t\t\t// server response\r\n\t    \t\t\tint requestType = getRequestType();\r\n\r\n\t    \t\t\t// send response\r\n\t    \t\t\tresponse = rtspResponse.toString();\r\n\r\n\t    \t\t\trtspBufferedWriter.write(response);\r\n\t\t    \t\trtspBufferedWriter.flush();\r\n\r\n\t    \t\t\tif (requestType == RtspConstants.SETUP) {\r\n\t    \t\t\t    \r\n\t    \t\t\t\tsetup = true;\r\n\r\n\t    \t\t\t    // update RTSP state\r\n\t    \t\t\t    rtspState = RtspConstants.READY;\r\n\t    \t\t\t\t\r\n\t    \t\t\t\t// in case of a setup request, we create a new RtpSocket \r\n\t    \t\t\t\t// instance used to send RtpPacket\r\n\t    \t\t\t\tthis.rtpSocket = new RtpSocket(this.clientAddress, this.clientPort);\r\n\t    \t\t\t\t\r\n\t    \t\t\t\t// this RTP socket is registered as RTP receiver to also\r\n\t    \t\t\t\t// receive the streaming video of this device\r\n\t    \t\t\t\tRtpSender.getInstance().addReceiver(this.rtpSocket);\r\n\r\n\t    \t\t\t}\r\n\t    \t\t\t\r\n\t    \t\t}\r\n\r\n\t    \t\t// this is an endless loop, that is terminated an\r\n\t    \t\t// with interrupt sent to the respective thread\r\n\r\n\t    \t\twhile (true) {\r\n\r\n\t    \t\t\t// pares incoming request to decide how to proceed\r\n\t    \t\t\tint requestType = getRequestType();\r\n\r\n\t    \t\t\t// send response\r\n\t    \t\t\tresponse = rtspResponse.toString();\r\n\t    \t\t\t\r\n\t    \t\t\trtspBufferedWriter.write(response);\r\n\t\t    \t\trtspBufferedWriter.flush();\r\n\t    \t\t\t\r\n\t    \t\t\tif ((requestType == RtspConstants.PLAY) && (rtspState == RtspConstants.READY)) {\t    \t\t\t\t\r\n\t\t    \t\t\tLog.i(TAG, \"request: PLAY\");\r\n\r\n\t    \t\t\t\t// make sure that the respective client socket is \r\n\t    \t\t\t\t// ready to send RTP packets\r\n\t    \t\t\t\tthis.rtpSocket.suspend(false);\r\n\t    \t\t\t\t\r\n\t    \t\t\t\tthis.rtspState = RtspConstants.PLAYING;\r\n\t    \t\t\t\t\r\n\t    \t\t\t} else if ((requestType == RtspConstants.PAUSE) && (rtspState == RtspConstants.PLAYING)) {\r\n\t\t    \t\t\tLog.i(TAG, \"request: PAUSE\");\r\n\t    \t\t\t\t\r\n\t    \t\t\t\t// suspend RTP socket from sending video packets\r\n\t    \t\t\t\tthis.rtpSocket.suspend(true);\r\n\t    \t\t\t\t\r\n\t    \t\t\t} else if (requestType == RtspConstants.TEARDOWN) {\r\n\t\t    \t\t\tLog.i(TAG, \"request: TEARDOWN\");\r\n\r\n\t    \t\t\t\t// this RTP socket is removed from the RTP Sender\r\n\t    \t\t\t\tRtpSender.getInstance().removeReceiver(this.rtpSocket);\r\n\t    \t\t\t\t\r\n\t    \t\t\t\t// close the clienr socket for receiving incoming RTSP request\r\n\t    \t\t\t\tthis.clientSocket.close();\r\n\t    \t\t\t\t\r\n\t    \t\t\t\t// close the associated RTP socket for sending RTP packets\r\n\t    \t\t\t\tthis.rtpSocket.close();\r\n\t    \t\t\t\t\r\n\t    \t\t\t}\r\n\r\n\t    \t\t\t// the pattern below enables an interrupt\r\n\t    \t\t\t// which allows to close this thread\r\n\t    \t\t\ttry {\r\n\t    \t\t\t\tsleep(20);\r\n\r\n\t    \t\t\t} catch (InterruptedException e) {\r\n\t    \t\t\t\tbreak;\r\n\t    \t\t\t}\r\n\t    \t\t\t\r\n\t    \t\t}\r\n\t      \r\n\t    \t} catch(Throwable t) {\r\n\t    \t\tt.printStackTrace();\r\n\t    \t\t\r\n\t    \t\tSystem.out.println(\"Caught \" + t + \" - closing thread\");\r\n\t      \r\n\t    \t}\r\n\t    }\r\n\t    \r\n\t    private int getRequestType() throws Exception {\r\n\t  \t\r\n\t    \tint requestType = -1;\r\n\r\n\t    \t// retrieve the request in a string representation \r\n\t    \t// for later evaluation\r\n\t    \tString requestLine = \"\";\r\n            try {\r\n            \trequestLine = Parser.readRequest(rtspBufferedReader);\r\n        \r\n            } catch (IOException e) {\r\n                e.printStackTrace();\r\n        \r\n            }\r\n            \r\n            Log.i(TAG, \"requestLine: \" + requestLine);\r\n            \r\n            // determine request type from incoming RTSP request\r\n            requestType = Parser.getRequestType(requestLine);\r\n\r\n            if (contentBase.isEmpty()) {\r\n                contentBase = Parser.getContentBase(requestLine);\r\n            }\r\n\r\n            if (!requestLine.isEmpty()) {\r\n                cseq = Parser.getCseq(requestLine);\r\n            }\r\n\r\n            if (requestType == RtspConstants.OPTIONS) {\r\n        \t\trtspResponse = new RtspOptionsResponse(cseq);\r\n\r\n\r\n            } else if (requestType == RtspConstants.DESCRIBE) {\r\n                buildDescribeResponse(requestLine);\r\n\r\n            } else if (requestType == RtspConstants.SETUP) {\r\n                buildSetupResponse(requestLine);\r\n\t\t                \r\n            } else if (requestType == RtspConstants.PAUSE) {\r\n                rtspResponse = new RtspPauseResponse(cseq);\r\n\t\t\r\n            } else if (requestType == RtspConstants.TEARDOWN) {\r\n                rtspResponse = new RtspResponseTeardown(cseq);\r\n \r\n            } else if (requestType == RtspConstants.PLAY) {\r\n                rtspResponse = new RtspPlayResponse(cseq);\t       \r\n                \r\n                String range = Parser.getRangePlay(requestLine);\r\n                if (range != null) ((RtspPlayResponse) rtspResponse).setRange(range);\r\n\r\n            } else {\r\n\t        \tif( requestLine.isEmpty()){\r\n\t        \t\trtspResponse = new RtspError(cseq);\r\n            \r\n\t        \t} else {\r\n                    rtspResponse = new RtspError(cseq);\r\n\t        \t}\r\n         \r\n            }\r\n\r\n            return requestType;\r\n\t    \r\n\t    }\r\n\r\n\t    /**\r\n\t     * Create an RTSP response for an incoming SETUP request.\r\n\t     * \r\n\t     * @param requestLine\r\n\t     * @throws Exception\r\n\t     */\r\n\t    private void buildSetupResponse(String requestLine) throws Exception {\r\n\t        \r\n\t    \trtspResponse = new RtspSetupResponse(cseq);\r\n\t        \r\n\t    \t// client port\r\n\t    \tclientPort = Parser.getClientPort(requestLine);\t            \r\n\t    \t((RtspSetupResponse) rtspResponse).setClientPort(clientPort);\r\n\t    \t\r\n\t    \t// transport protocol\r\n            ((RtspSetupResponse) rtspResponse).setTransportProtocol(Parser.getTransportProtocol(requestLine));\r\n            \r\n            // session type\r\n            ((RtspSetupResponse) rtspResponse).setSessionType(Parser.getSessionType(requestLine));\r\n\r\n            ((RtspSetupResponse) rtspResponse).setClientIP(this.clientAddress.getHostAddress());\r\n\t            \r\n            int[] interleaved = Parser.getInterleavedSetup(requestLine);\r\n            if(interleaved != null){\r\n                ((RtspSetupResponse) rtspResponse).setInterleaved(interleaved);\r\n            }\r\n\r\n\t    }\r\n\r\n\t    /**\r\n\t     * Create an RTSP response for an incoming DESCRIBE request.\r\n\t     * \r\n\t     * @param requestLine\r\n\t     * @throws Exception\r\n\t     */\r\n\t    private void buildDescribeResponse(String requestLine) throws Exception{\r\n                \r\n    \t   rtspResponse = new RtspDescribeResponse(cseq);\r\n           \r\n    \t   // set file name\r\n    \t   String fileName = Parser.getFileName(requestLine);\r\n    \t   ((RtspDescribeResponse) rtspResponse).setFileName(fileName);\r\n        \t   \r\n    \t   // set video encoding\r\n    \t   ((RtspDescribeResponse) rtspResponse).setVideoEncoder(encoder);\r\n\r\n    \t   // finally set content base\r\n    \t   ((RtspDescribeResponse)rtspResponse).setContentBase(contentBase);\r\n        \r\n       }\r\n\r\n\t}\r\n\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/server/response/Parser.java",
    "content": "package de.kp.net.rtsp.server.response;\r\n\r\nimport java.io.BufferedReader;\r\nimport java.io.IOException;\r\nimport java.net.URI;\r\nimport java.util.StringTokenizer;\r\n\r\nimport de.kp.net.rtsp.RtspConstants;\r\n\r\n/**\r\n * This class provides a parser for incoming RTSP\r\n * messages and splits them into appropriate parts.\r\n * \r\n * @author Stefan Krusche (krusche@dr-kruscheundpartner.de)\r\n *\r\n */\r\npublic class Parser {\r\n        \r\n    /**\r\n     * @param rtspBufferedReader\r\n     * @return\r\n     * @throws IOException\r\n     */\r\n    public static String readRequest(BufferedReader rtspBufferedReader) throws IOException {\r\n \r\n    \tString request = new String();\r\n\r\n    \tboolean endFound = false;\r\n    \tint c;\r\n\r\n    \twhile ((c = rtspBufferedReader.read()) != -1) {\r\n        \r\n    \t\trequest += (char) c;\r\n    \t\tif (c == '\\n') {\r\n            \r\n    \t\t\tif (endFound) {\r\n    \t\t\t\tbreak;\r\n\t \r\n    \t\t\t} else {\r\n    \t\t\t\tendFound = true;\r\n    \t\t\t}\r\n\r\n    \t\t} else {\r\n    \t\t\tif (c != '\\r') {\r\n    \t\t\t\tendFound = false;\r\n    \t\t\t}\r\n     \r\n    \t\t}\r\n\r\n    \t}\r\n\r\n    \treturn request;\r\n    \r\n    }\r\n\r\n    /**\r\n     * This method determines the request type of an\r\n     * incoming RTSP request.\r\n     * \r\n     * @param request\r\n     * @return\r\n     */\r\n    public static int getRequestType(String request) {\r\n\r\n    \tStringTokenizer tokens = new StringTokenizer(request);\r\n        String requestType = \"\";\r\n\r\n        if (tokens.hasMoreTokens()) {\r\n\t        requestType = tokens.nextToken();\r\n        }\r\n\r\n        if ((new String(requestType)).compareTo(\"OPTIONS\") == 0)\r\n        \treturn RtspConstants.OPTIONS;\r\n            \r\n        else if ((new String(requestType)).compareTo(\"DESCRIBE\") == 0)\r\n            return RtspConstants.DESCRIBE;\r\n            \r\n        else if ((new String(requestType)).compareTo(\"SETUP\") == 0)\r\n            return RtspConstants.SETUP;\r\n            \r\n        else if ((new String(requestType)).compareTo(\"PLAY\") == 0)\r\n            return RtspConstants.PLAY;\r\n            \r\n        else if ((new String(requestType)).compareTo(\"PAUSE\") == 0)\r\n            return RtspConstants.PAUSE;\r\n            \r\n        else if ((new String(requestType)).compareTo(\"TEARDOWN\") == 0)\r\n            return RtspConstants.TEARDOWN;\r\n\r\n        return -1;\r\n    \r\n    }\r\n\r\n    /**\r\n     * @param request\r\n     * @return\r\n     */\r\n    public static String getContentBase(String request) {\r\n\r\n    \tStringTokenizer tokens = new StringTokenizer(request);\r\n        String contentBase = \"\";\r\n        \r\n        if (tokens.hasMoreTokens()) {\r\n        \tcontentBase = tokens.nextToken();\r\n            contentBase = tokens.nextToken();\r\n        }\r\n\r\n        return contentBase;\r\n    \r\n    }\r\n\r\n    /**\r\n     * @param request\r\n     * @return\r\n     * @throws Exception\r\n     */\r\n    public static int getCseq(String request) throws Exception {\r\n        \r\n    \tString ineInput = getLineInput(request, \"\\r\\n\", \"CSeq\");\r\n        String cseq = ineInput.substring(6);\r\n            \r\n        return Integer.parseInt(cseq);\r\n    \r\n    }\r\n\r\n    /**\r\n     * @param request\r\n     * @return\r\n     * @throws Exception\r\n     */\r\n    public static int[] getInterleavedSetup(String request) throws Exception {\r\n\r\n    \tint[] interleaved = null;\r\n            \r\n    \tString lineInput = getLineInput(request, \"\\r\\n\", \"Transport:\");\r\n        String[] parts = lineInput.split(\"interleaved=\");\r\n\r\n        int t = parts.length;\r\n        if (t > 1) {\r\n        \t\r\n            parts = parts[1].split(\"-\");\r\n            interleaved = new int[2];\r\n            \r\n            interleaved[0] = Integer.parseInt(parts[0]);\r\n            interleaved[1] = Integer.parseInt(parts[1]);\r\n        }\r\n\r\n        return interleaved;\r\n\r\n    }\r\n\r\n    /**\r\n     * @param request\r\n     * @return\r\n     * @throws Exception\r\n     */\r\n    public static String getFileName(String request) throws Exception {\r\n                \r\n    \tString lineInput = getLineInput(request, \" \", \"rtsp\");\r\n    \tURI uri = new URI(lineInput);\r\n    \t\r\n    \t//String[] parts = lineInput.split(\"rtsp://\" + RtspConstants.SERVER_IP + \"/\");        \r\n        //String fileName = parts[1];\r\n        \r\n    \tString fileName = uri.getPath();\r\n        return fileName;\r\n           \r\n    }\r\n\r\n    /**\r\n     * This method retrieves a certain input from an\r\n     * incoming RTSP request, described by a separator\r\n     * and a specific prefix.\r\n     * \r\n     * @param request\r\n     * @param separator\r\n     * @param prefix\r\n     * @return\r\n     * @throws Exception\r\n     */\r\n    public static String getLineInput(String request, String separator, String prefix) throws Exception {\r\n            \r\n    \tStringTokenizer str = new StringTokenizer(request, separator);\r\n        String token = null;\r\n\r\n        boolean match = false;\r\n        \r\n        while (str.hasMoreTokens()) {\r\n            token = str.nextToken();\r\n            if (token.startsWith(prefix)) {            \t\r\n            \tmatch = true;\r\n                break;\r\n            }\r\n        }\r\n\r\n        return (match == true) ? token : null;\r\n    \r\n    }\r\n\r\n    /**\r\n     * This method retrieves the client port\r\n     * from an incoming RTSP request.\r\n     * \r\n     * @param request\r\n     * @return\r\n     * @throws Exception\r\n     */\r\n    public static int getClientPort(String request) throws Exception {\r\n\r\n    \tString lineInput = getLineInput(request, \"\\r\\n\", \"Transport:\");\r\n    \tif (lineInput == null) throw new Exception();\r\n            \r\n    \tString[] parts = lineInput.split(\";\");\r\n        parts[2] = parts[2].substring(12);\r\n            \r\n        String[] ports = parts[2].split(\"-\");\r\n        return Integer.parseInt(ports[0]);\r\n    \r\n    }\r\n \r\n    /**\r\n     * This method retrieves the transport protocol\r\n     * from an incoming RTSP request.\r\n     * \r\n     * @param request\r\n     * @return\r\n     * @throws Exception\r\n     */\r\n    public static String getTransportProtocol(String request) throws Exception {\r\n    \t\r\n        String lineInput = getLineInput(request, \"\\r\\n\", \"Transport:\");\r\n    \tif (lineInput == null) throw new Exception();\r\n        \r\n        String[] parts = lineInput.split(\";\");\r\n        parts[0] = parts[0].substring(11);\r\n        \r\n        return parts[0];\r\n    \r\n    }\r\n\r\n    /**\r\n     * This method retrieves the range from an\r\n     * incoming RTSP request.\r\n     * \r\n     * @param request\r\n     * @return\r\n     * @throws Exception\r\n     */\r\n    public static String getRangePlay(String request) throws Exception {\r\n\r\n    \tString lineInput = getLineInput(request, \"\\r\\n\", \"Range:\");\r\n    \tif (lineInput == null) {\r\n    \t\t\r\n    \t\t/* \r\n    \t\t * Android's video view does not provide\r\n    \t\t * range information with a PLAY request\r\n    \t\t */\r\n    \t\t\r\n    \t\treturn null;\r\n    \t\t\r\n    \t}\r\n        \r\n    \tString[] parts = lineInput.split(\"=\");\r\n        return parts[1];\r\n    \r\n    }\r\n\r\n    /**\r\n     * \r\n     * This method determines the session type from an\r\n     * incoming RTSP request.\r\n     * \r\n     * @param request\r\n     * @return\r\n     * @throws Exception\r\n     */\r\n    public static String getSessionType(String request) throws Exception {\r\n        \r\n    \tString lineInput = getLineInput(request, \"\\r\\n\", \"Transport:\");\r\n    \tif (lineInput == null) throw new Exception();\r\n        \r\n    \tString[] parts = lineInput.split(\";\");\r\n        return parts[1].trim();\r\n    \r\n    }\r\n        \r\n    /**\r\n     * This method retrieves the user agent from an\r\n     * incoming RTSP request.\r\n     * \r\n     * @param request\r\n     * @return\r\n     * @throws Exception\r\n     */\r\n    public String getUserAgent(String request) throws Exception{\r\n        \r\n    \tString lineInput = getLineInput(request, \"\\r\\n\", \"User-Agent:\");\r\n    \tif (lineInput == null) throw new Exception();\r\n        \r\n    \tString[] parts = lineInput.split(\":\");\r\n        return parts[1];\r\n    \r\n    }\r\n\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/server/response/RtspAnnounceResponse.java",
    "content": "package de.kp.net.rtsp.server.response;\r\n\r\npublic class RtspAnnounceResponse extends RtspResponse {\r\n\r\n    public RtspAnnounceResponse(int cseq) {           \r\n    \tsuper(cseq);\r\n    }\r\n\r\n    protected void generateBody() {\r\n    }\r\n\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/server/response/RtspDescribeResponse.java",
    "content": "package de.kp.net.rtsp.server.response;\r\n\r\nimport java.net.UnknownHostException;\r\n\r\nimport de.kp.net.rtsp.RtspConstants.VideoEncoder;\r\n\r\npublic class RtspDescribeResponse extends RtspResponse {\r\n\r\n    protected String rtpSession  = \"\";\r\n    protected String contentBase = \"\";\r\n\r\n    private String fileName;\r\n    private VideoEncoder encoder;\r\n    \r\n    public RtspDescribeResponse(int cseq) {\r\n        super(cseq);\r\n    }\r\n\r\n    protected void generateBody() {\r\n            \r\n    \tSDP sdp = new SDP(fileName, encoder);\r\n\r\n        String sdpContent = \"\";\r\n        try {\r\n            sdpContent = CRLF2 + sdp.getSdp();\r\n            \r\n        } catch (UnknownHostException e) {\r\n            e.printStackTrace();\r\n        }\r\n        \r\n        body += \"Content-base: \"+contentBase + CRLF\r\n        + \"Content-Type: application/sdp\"+ CRLF \r\n        + \"Content-Length: \"+ sdpContent.length() + sdpContent;\r\n\r\n    }\r\n\r\n    public String getContentBase() {\r\n        return contentBase;\r\n    }\r\n\r\n    public void setContentBase(String contentBase) {\r\n        this.contentBase = contentBase;\r\n    }\r\n    \r\n    public void setFileName(String fileName) {\r\n    \tthis.fileName = fileName;\r\n    }\r\n\r\n    public void setVideoEncoder(VideoEncoder encoder) {\r\n    \tthis.encoder = encoder;\r\n    }\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/server/response/RtspError.java",
    "content": "package de.kp.net.rtsp.server.response;\r\n\r\npublic class RtspError extends RtspResponse {\r\n\r\n    public RtspError(int cseq) {\r\n        super(cseq);\r\n    }\r\n\r\n    protected void generateBody() {\r\n    }\r\n\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/server/response/RtspOptionsResponse.java",
    "content": "package de.kp.net.rtsp.server.response;\r\n\r\npublic class RtspOptionsResponse extends RtspResponse {\r\n\r\n    public RtspOptionsResponse(int cseq) {\r\n        super(cseq);\r\n    }\r\n    \r\n    protected void generateBody() {\r\n        this.body = \"Public:DESCRIBE,SETUP,TEARDOWN,PLAY,PAUSE\"/*+SL*/;\r\n    }\r\n\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/server/response/RtspPauseResponse.java",
    "content": "package de.kp.net.rtsp.server.response;\r\n\r\npublic class RtspPauseResponse extends RtspResponse {\r\n\r\n    public RtspPauseResponse(int cseq) {\r\n        super(cseq);\r\n    }\r\n\r\n    protected void generateBody() {\r\n    }\r\n\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/server/response/RtspPlayResponse.java",
    "content": "package de.kp.net.rtsp.server.response;\r\n\r\npublic class RtspPlayResponse extends RtspResponse {\r\n\r\n    protected String range = \"\";\r\n   \r\n    public RtspPlayResponse(int cseq) {\r\n        super(cseq);\r\n    }\r\n\r\n    protected void generateBody() {\t\r\n    \tthis.body += \"Session: \" + session_id + CRLF + \"Range: npt=\" + range;\r\n    }\r\n\r\n    public String getRange() {\r\n        return range;\r\n    }\r\n\r\n    public void setRange(String range) {\r\n        this.range = range;\r\n    }\r\n      \r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/server/response/RtspResponse.java",
    "content": "package de.kp.net.rtsp.server.response;\r\n\r\nimport java.util.Date;\r\n\r\nimport de.kp.net.rtsp.RtspConstants;\r\n\r\npublic abstract class RtspResponse {\r\n\r\n    protected String response=\"\";\r\n       \r\n    protected int cseq = 0;\r\n       \r\n   \tprotected static int session_id = -1;       \r\n   \tprotected boolean newSessionId = true;\r\n       \r\n\tprotected String body = \"\";\r\n       \r\n    /**\r\n     * CR = <US-ASCII CR, carriage return (13)>\r\n     * LF = <US-ASCII LF, linefeed (10)>\r\n     * CRLF = CR LF\r\n     */\r\n    public static final String CRLF  = \"\\r\\n\";\r\n    public static final String CRLF2 = \"\\r\\n\\r\\n\";\r\n    public static final String SEP   = \" \";\r\n   \r\n    public RtspResponse(int cseq){\r\n            this.cseq = cseq;\r\n    }\r\n       \r\n    protected String getHeader() {\r\n\r\n    \tStringBuffer sb = new StringBuffer();\r\n    \t\r\n    \tsb.append(\"RTSP/1.0\" + SEP + \"200\" + SEP + \"OK\" + CRLF);\r\n    \tsb.append(cseq() +CRLF);\r\n    \t\r\n    \tsb.append(\"Date: \" + new Date().toGMTString() + CRLF);\r\n    \tsb.append(\"Server: \" + getServer() + CRLF);\r\n\r\n    \treturn sb.toString();\r\n    \t\r\n    }\r\n       \r\n\tprotected String cseq() {\r\n        return \"CSeq:\" + SEP + getCseq();\r\n\t}\r\n   \r\n    protected String getResponse() {\r\n        return response;\r\n    }\r\n\r\n    protected void setResponse(String response) {\r\n        this.response = response;\r\n    }\r\n\r\n    protected String getServer(){\r\n        return RtspConstants.SERVER_NAME + \"/\" + RtspConstants.SERVER_VERSION;\r\n    }\r\n       \r\n    protected int getCseq() {\r\n        return cseq;\r\n    }\r\n\r\n    protected void setCseq(int cseq) {\r\n        this.cseq = cseq;\r\n    }\r\n\r\n    protected String getBody() {\r\n        return body;\r\n    }\r\n\r\n    protected void setBody(String cuerpo) {\r\n        this.body = cuerpo;\r\n    }\r\n\r\n    protected void generate(){\r\n    \r\n    \t// note that it is important to close the response\r\n    \t// message with 2 CRLFs\r\n    \tresponse += getHeader();\r\n        response += getBody() + CRLF2;\r\n\r\n    }\r\n       \r\n    protected abstract void generateBody();\r\n       \r\n    public String toString() {\r\n            \r\n    \tgenerateBody();            \r\n    \tgenerate();\r\n  \r\n    \treturn response;\r\n    \r\n    }\r\n       \r\n    public void createSessionId(boolean bool){\r\n        newSessionId = bool;\r\n    }\r\n\r\n}\r\n\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/server/response/RtspResponseTeardown.java",
    "content": "package de.kp.net.rtsp.server.response;\r\n\r\npublic class RtspResponseTeardown extends RtspResponse {\r\n\r\n    public RtspResponseTeardown(int cseq) {\r\n        super(cseq);\r\n    }\r\n\r\n    protected void generar(){\r\n        response += getHeader();\r\n        response += getBody() + CRLF;\r\n    }\r\n    \r\n    protected void generateBody() {\r\n        body += \"\";\r\n    }\r\n\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/server/response/RtspSetupResponse.java",
    "content": "package de.kp.net.rtsp.server.response;\r\n\r\nimport java.util.Random;\r\n\r\nimport de.kp.net.rtsp.RtspConstants;\r\n\r\npublic class RtspSetupResponse extends RtspResponse {\r\n\r\n    private int clientRTP, clientRTCP;\r\n        \r\n    private String clientIP = \"\";\r\n        \r\n    private int[] interleaved;        \r\n    private String transportProtocol = \"\";\r\n        \r\n    private String sessionType = \"\";\r\n\r\n    public RtspSetupResponse(int cseq) {\r\n        super(cseq);\r\n    }\r\n\r\n    protected void generateBody() {\r\n\r\n    \tcreateSessionId();\r\n\r\n    \tbody += \"Session: \" + session_id + CRLF + \"Transport: \" + transportProtocol + \";\" + sessionType + \";\";\r\n        if (interleaved==null) {\r\n            body += \"source=\" + RtspConstants.SERVER_IP + \";\" + getPortPart();\r\n        \r\n        } else {\r\n            body += getInterleavedPart();\r\n        }\r\n\r\n    }\r\n        \r\n    private String getPortPart(){\r\n        \r\n    \tString r= \"client_port=\" + clientRTP + \"-\" + clientRTCP + \";\" + \"server_port=\" + RtspConstants.PORTS_RTSP_RTP[0] + \"-\" + RtspConstants.PORTS_RTSP_RTP[1];\r\n\t    return r;\r\n\r\n    }\r\n        \r\n    private String getInterleavedPart() {\r\n        return \"client_ip=\" + clientIP + \";interleaved=\" + interleaved[0] + \"-\" + interleaved[1];\r\n    }\r\n\r\n    private final void createSessionId() {\r\n \r\n    \tRandom r = new Random();\r\n        int id = r.nextInt();\r\n        \r\n        if (id < 0) {\r\n            id *= -1;\r\n        }\r\n        \r\n        if (newSessionId) {\r\n            session_id = id;\r\n        }\r\n    \r\n    }\r\n\r\n    public void setClientPort(int port) {\r\n        clientRTP  = port;\r\n        clientRTCP = port + 1;\r\n    }\r\n\r\n    public String getTransportProtocol() {\r\n        return transportProtocol;\r\n    }\r\n\r\n   public void setTransportProtocol(String transportProtocol) {\r\n        this.transportProtocol = transportProtocol;\r\n    }\r\n\r\n    public String getSessionType() {\r\n        return sessionType;\r\n    }\r\n\r\n    public void setSessionType(String sessionType) {\r\n        this.sessionType = sessionType;\r\n    }\r\n\r\n     public int[] getInterleaved() {\r\n            return interleaved;\r\n    }\r\n\r\n    public void setInterleaved(int[] interleaved) {\r\n        this.interleaved = interleaved;\r\n    }\r\n\r\n    public String getClientIP() {\r\n        return clientIP;\r\n    }\r\n\r\n    public void setClientIP(String clientIP) {\r\n        this.clientIP = clientIP;\r\n    }\r\n        \r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/net/rtsp/server/response/SDP.java",
    "content": "package de.kp.net.rtsp.server.response;\r\n\r\nimport java.net.UnknownHostException;\r\n\r\nimport com.orangelabs.rcs.core.ims.protocol.rtp.codec.video.h264.H264Config;\r\n\r\nimport de.kp.net.rtsp.RtspConstants;\r\nimport de.kp.net.rtsp.RtspConstants.VideoEncoder;\r\n\r\npublic class SDP {\r\n\r\n\t// the default file name\r\n\tprivate String fileName = \"kupdroid\";\r\n\t\r\n\tprivate int audioClientPort = RtspConstants.CLIENT_AUDIO_PORT;\r\n\tprivate int clientVideoPort = RtspConstants.CLIENT_VIDEO_PORT;\r\n\r\n\tprivate VideoEncoder encoder;\r\n\r\n\tpublic SDP(String fileName, VideoEncoder encoder) {\r\n\r\n\t\tthis.fileName = fileName;\r\n\t\tthis.encoder  = encoder;\r\n\t\r\n\t}\r\n\t\r\n\t/**\r\n\t * This method is used to build a minimal\r\n\t * SDP file description.\r\n\t * \r\n\t * @return\r\n\t * @throws UnknownHostException\r\n\t */\r\n\tpublic String getSdp() throws UnknownHostException {\r\n\r\n\t\tStringBuffer buf = new StringBuffer();\r\n\t\t\r\n\t\tbuf.append(\"v=0\" + RtspResponse.CRLF);\r\n\t\t// filename contains leading slash\r\n\t\tbuf.append(\"s=\"  + fileName.substring(1) + RtspResponse.CRLF);\r\n\t\t\r\n\t\tint track = 1;\r\n\t\tbuf.append(getSDPVideo(track));\r\n\r\n\t\treturn buf.toString();\r\n\t}\r\n\t\r\n\t/*\r\n\tprivate StringBuffer getSDPAudio(){\r\n\t\t\r\n\t\tStringBuffer buf = new StringBuffer();\r\n\t\t\r\n\t\t//m=<media> <port> <proto> <fmt>\r\n\t\tbuf.append(\"m=audio \" + audioClientPort + \" RTP/AVP 14\" + RtspResponse.CRLF);\r\n\t\t//a=rtpmap:<payload type> <encoding name>/<clock rate> [/<encoding parameters>]\r\n\t\t\r\n\t\tbuf.append(\"a=rtpmap:14 MPA/90000\" + RtspResponse.CRLF);\r\n\t\tbuf.append(\"a=control:rtsp://\" + RtspConstants.SERVER_IP + \"/audio\" + RtspResponse.CRLF);\r\n\t\t\r\n\t\tbuf.append(\"a=mimetype: audio/MPA\" + RtspResponse.CRLF);\r\n\t\tbuf.append(\"a=range:npt=0-\");\r\n\t\t\r\n\t\treturn buf;\r\n\t\r\n\t}\r\n\t*/\r\n\t\r\n\tprivate StringBuffer getSDPVideo(int track){\r\n\t\t\r\n\t\tStringBuffer sb = new StringBuffer();\r\n\t\t\t\t\r\n\t\t// H263 encoding\r\n\t\tif (encoder.equals(VideoEncoder.H263_ENCODER)) {\r\n\t\t\t// cross encoder properties\r\n\t\t\tsb.append(\"m=video \" + clientVideoPort + RtspConstants.SEP + \"RTP/AVP \" + RtspConstants.RTP_H263_PAYLOADTYPE + RtspResponse.CRLF);\t\r\n\t\t\t// set to H263-2000\r\n\t\t\tsb.append(\"a=rtpmap:\" + RtspConstants.RTP_H263_PAYLOADTYPE + RtspConstants.SEP + RtspConstants.H263_2000 + RtspResponse.CRLF);\r\n\r\n\t\t\t// additional information for android video view, due to extended checking mechanism\r\n\t\t\tsb.append(\"a=framesize:\" + RtspConstants.RTP_H263_PAYLOADTYPE + RtspConstants.SEP + RtspConstants.WIDTH + \"-\" + RtspConstants.HEIGHT + RtspResponse.CRLF);\r\n\r\n\t\t} else if (encoder.equals(VideoEncoder.H264_ENCODER)) {\r\n\t\t\t// cross encoder properties\r\n\t\t\tsb.append(\"m=video \" + clientVideoPort + RtspConstants.SEP + \"RTP/AVP \" + RtspConstants.RTP_H264_PAYLOADTYPE + RtspResponse.CRLF);\t\r\n\r\n\t\t\tsb.append(\"a=rtpmap:\" + RtspConstants.RTP_H264_PAYLOADTYPE + RtspConstants.SEP + RtspConstants.H264 + RtspResponse.CRLF);\r\n\r\n\t\t\t\r\n\t\t\t/*\r\n\t\t\t * with change to in-band SPS/PPS parameters following SDP statements should be unnecessary \r\n\t\t\t */\r\n\t\t\t// 176x144 15fps\r\n\t\t\t//sb.append(\"a=fmtp:\" + RtspConstants.RTP_H264_PAYLOADTYPE + \" packetization-mode=0;\" + H264Config.CODEC_PARAMS +\";sprop-parameter-sets=J0IAINoLExA,KM48gA==;\" + RtspResponse.CRLF); \r\n\t\t\t// 352 288 15fps\r\n//\t\t\tsb.append(\"a=fmtp:\" + RtspConstants.RTP_H264_PAYLOADTYPE + \" packetization-mode=0;\" + H264Config.CODEC_PARAMS +\";sprop-parameter-sets=J0IAINoFglE=,KM48gA==;\" + RtspResponse.CRLF); \r\n\r\n\t\t\t\r\n\t\t\t//buf.append(\"a=fmtp:98 packetization-mode=1;profile-level-id=420020;sprop-parameter-sets=J0IAIKaAoD0Q,KM48gA==;\" + RtspResponse.CRLF); // 640x480 20fps\r\n//\t\t\tbuf.append(\"a=fmtp:98 packetization-mode=1;profile-level-id=420020;sprop-parameter-sets=J0IAINoLExA,KM48gA==;\" + RtspResponse.CRLF); // 176x144 15fps\r\n//\t\t\tsb.append(\"a=fmtp:\" + RtspConstants.RTP_H264_PAYLOADTYPE + \" packetization-mode=1;\" + H264Config.CODEC_PARAMS +\";sprop-parameter-sets=J0IAIKaCxMQ=,KM48gA==;\" + RtspResponse.CRLF); // 176x144 20fps\r\n//\t\t\tbuf.append(\"a=fmtp:98 packetization-mode=1;profile-level-id=420020;sprop-parameter-sets=J0IAINoFB8Q=,KM48gA==;\" + RtspResponse.CRLF); // 320x240 10fps\r\n\t\t\t\r\n\t\t\t// additional information for android video view, due to extended checking mechanism\r\n\t\t\tsb.append(\"a=framesize:\" + RtspConstants.RTP_H264_PAYLOADTYPE + RtspConstants.SEP + RtspConstants.WIDTH + \"-\" + RtspConstants.HEIGHT + RtspResponse.CRLF);\r\n\t\t}\r\n\r\n\t\tsb.append(\"a=control:trackID=\" + String.valueOf(track));\r\n\t\treturn sb;\r\n\t\r\n\t}\r\n\t\r\n\t/*\r\n\tprivate StringBuffer getSDPWebcam(){\r\n\t\t\r\n\t\tStringBuffer buf = new StringBuffer();\r\n\t\t\r\n\t\tbuf.append(\"m=video \" + clientVideoPort + \" RTP/AVP 26\" + RtspResponse.CRLF);\r\n\t\tbuf.append(\"a=rtpmap:26 JPEG/90000\"+RtspResponse.CRLF);\r\n\t\t\r\n\t\tbuf.append(\"a=control:rtsp://\" + RtspConstants.SERVER_IP + \"/video\" + RtspResponse.CRLF);\r\n\t\tbuf.append(\"a=mimetype: video/JPEG\" + RtspResponse.CRLF);\r\n\t\t\r\n\t\tbuf.append(\"a=range:npt=0-100\");\r\n\t\t\r\n\t\treturn buf;\r\n\t\r\n\t}\r\n\t*/\r\n\t\r\n\tpublic String getFileName() {\r\n\t\treturn fileName;\r\n\t}\r\n\r\n\t/**\r\n\t * @param fileName\r\n\t */\r\n\tpublic void setFileName(String fileName) {\r\n\t\tthis.fileName = fileName;\r\n\t}\r\n\r\n\tpublic int getClientAudioPort() {\r\n\t\treturn audioClientPort;\r\n\t}\r\n\r\n\tpublic void setClientAudioPort(int clientAudioPort) {\r\n\t\tthis.audioClientPort = clientAudioPort;\r\n\t}\r\n\r\n\tpublic int getClientVideoPort() {\r\n\t\treturn clientVideoPort ;\r\n\t}\r\n\r\n\tpublic void setClientVideoPort(int clientVideoPort) {\r\n\t\tthis.clientVideoPort  = clientVideoPort;\r\n\t}\r\n\t\r\n}"
  },
  {
    "path": "RtspCamera/src/de/kp/rtspcamera/MediaConstants.java",
    "content": "package de.kp.rtspcamera;\r\n\r\npublic class MediaConstants {\r\n\r\n\r\n\tpublic static boolean H264_CODEC = true;\r\n\t\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/rtspcamera/RtspApiCodecsCamera.java",
    "content": "package de.kp.rtspcamera;\r\n\r\nimport java.io.IOException;\r\nimport java.io.InputStream;\r\nimport java.net.SocketException;\r\n\r\nimport android.app.Activity;\r\nimport android.media.MediaRecorder;\r\nimport android.net.LocalServerSocket;\r\nimport android.net.LocalSocket;\r\nimport android.net.LocalSocketAddress;\r\nimport android.os.Bundle;\r\nimport android.util.Log;\r\nimport android.view.SurfaceHolder;\r\nimport android.view.SurfaceView;\r\nimport android.view.Window;\r\nimport android.view.WindowManager;\r\nimport de.kp.net.rtp.RtpSender;\r\nimport de.kp.net.rtp.packetizer.AbstractPacketizer;\r\nimport de.kp.net.rtp.packetizer.H263Packetizer;\r\nimport de.kp.net.rtp.packetizer.H264Packetizer;\r\nimport de.kp.net.rtsp.RtspConstants;\r\nimport de.kp.net.rtsp.server.RtspServer;\r\n\r\npublic class RtspApiCodecsCamera extends Activity {\r\n\r\n\tprivate String TAG = \"RTSPCamera\";\r\n\r\n\t// default RTSP command port is 554\r\n\tprivate int SERVER_PORT = 8080;\r\n\r\n\tprivate SurfaceView mVideoPreview;\r\n\tprivate SurfaceHolder mSurfaceHolder;\r\n\r\n\t// these parameters are used to separate between incoming\r\n\t// and outgoing streams\r\n\tprivate LocalServerSocket localSocketServer;\r\n\tprivate LocalSocket receiver;\r\n\tprivate LocalSocket sender;\r\n\r\n\tprivate MediaRecorder mediaRecorder;\r\n\r\n\tprivate boolean mediaRecorderRecording = false;\r\n\tprotected boolean videoQualityHigh = false;\r\n\r\n\tprivate RtpSender rtpSender;\r\n\tprivate RtspServer streamer = null;\r\n\t\r\n\tprivate AbstractPacketizer videoPacketizer;\r\n\r\n\t@Override\r\n\tpublic void onCreate(Bundle savedInstanceState) {\r\n\t\tsuper.onCreate(savedInstanceState);\r\n\r\n\t\tLog.d(TAG, \"onCreate\");\r\n\r\n        requestWindowFeature(Window.FEATURE_NO_TITLE);\r\n        Window win = getWindow();\r\n\r\n        win.addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);\t\t\r\n        win.setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN, WindowManager.LayoutParams.FLAG_FULLSCREEN); \r\n\r\n\t\tsetContentView(R.layout.cameraapicodecs);\r\n\r\n\t\t// hold the reference\r\n\t\trtpSender = RtpSender.getInstance();\r\n\r\n\t\t/*\r\n\t\t * Video preview initialization\r\n\t\t */\r\n\t\tmVideoPreview = (SurfaceView) findViewById(R.id.smallcameraview);\r\n\t\tmSurfaceHolder = mVideoPreview.getHolder();\r\n\t\tmSurfaceHolder.addCallback(surfaceCallback);\r\n\t\tmSurfaceHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS);\r\n\t\t\r\n\t}\r\n\r\n\tpublic void onResume() {\r\n\r\n\t\tLog.d(TAG, \"onResume\");\r\n\r\n\t\t// starts the RTSP Server\r\n\r\n\t\ttry {\r\n\r\n\t\t\t// initialize video encoder to be used\r\n\t\t\t// for SDP file generation\r\n\t\t\tRtspConstants.VideoEncoder rtspVideoEncoder = (MediaConstants.H264_CODEC == true) ? RtspConstants.VideoEncoder.H264_ENCODER\r\n\t\t\t\t\t: RtspConstants.VideoEncoder.H263_ENCODER;\r\n\r\n\t\t\tif (streamer == null) {\r\n\t\t\t\tstreamer = new RtspServer(SERVER_PORT, rtspVideoEncoder);\r\n\t\t\t\tnew Thread(streamer).start();\r\n\t\t\t}\r\n\r\n\t\t\tLog.d(TAG, \"RtspServer started\");\r\n\r\n\t\t} catch (IOException e) {\r\n\t\t\t// TODO Auto-generated catch block\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\r\n\t\t/*\r\n\t\t * Camera initialization\r\n\t\t */\r\n\t\treceiver = new LocalSocket();\r\n\t\ttry {\r\n\r\n\t\t\tlocalSocketServer = new LocalServerSocket(\"camera2rtsp\");\r\n\r\n\t\t\t// InputStream the RTPPackets can be built from\r\n\t\t\treceiver.connect(new LocalSocketAddress(\"camera2rtsp\"));\r\n\t\t\treceiver.setReceiveBufferSize(500000);\r\n\t\t\treceiver.setSendBufferSize(500000);\r\n\r\n\t\t\t// FileDescriptor the Camera can send to\r\n\t\t\tsender = localSocketServer.accept();\r\n\t\t\tsender.setReceiveBufferSize(500000);\r\n\t\t\tsender.setSendBufferSize(500000);\r\n\r\n\t\t} catch (IOException e1) {\r\n\t\t\te1.printStackTrace();\r\n\t\t\tsuper.onResume();\r\n\t\t\tfinish();\r\n\t\t\treturn;\r\n\t\t}\r\n\r\n\t\tsuper.onResume();\r\n\r\n\t}\r\n\r\n\t@Override\r\n\tpublic void onPause() {\r\n\r\n\t\t// stop RTSP server\r\n\t\tif (streamer != null)\r\n\t\t\tstreamer.stop();\r\n\t\tstreamer = null;\r\n\r\n\t\tsuper.onPause();\r\n\t}\r\n\t\r\n\t/*\r\n\t * MediaRecorder listener\r\n\t */\r\n\tprivate MediaRecorder.OnErrorListener mErrorListener = new MediaRecorder.OnErrorListener() {\r\n\t\t\r\n\t\tpublic void onError(MediaRecorder mr, int what, int extra) {\r\n\t\t\t// MediaRecorder or MediaPlayer error\r\n\t\t\trtpSender.stop();\r\n\t\t}\r\n\t};\r\n\r\n\t/*\r\n\t * SurfaceHolder callback triple\r\n\t */\r\n\tSurfaceHolder.Callback surfaceCallback = new SurfaceHolder.Callback() {\r\n\r\n\t\t/*\r\n\t\t * Created state: - Open camera - initial call to startPreview() - hook\r\n\t\t * PreviewCallback() on it, which notifies waiting thread with new\r\n\t\t * preview data - start thread\r\n\t\t * \r\n\t\t * @see android.view.SurfaceHolder.Callback#surfaceCreated(android.view.\r\n\t\t * SurfaceHolder )\r\n\t\t */\r\n\t\tpublic void surfaceCreated(SurfaceHolder holder) {\r\n\t\t\tLog.d(TAG, \"surfaceCreated\");\r\n\t\t}\r\n\r\n\t\t/*\r\n\t\t * Changed state: - initiate camera preview size, set\r\n\t\t * camera.setPreviewDisplay(holder) - subsequent call to startPreview()\r\n\t\t * \r\n\t\t * @see android.view.SurfaceHolder.Callback#surfaceChanged(android.view.\r\n\t\t * SurfaceHolder , int, int, int)\r\n\t\t */\r\n\t\tpublic void surfaceChanged(SurfaceHolder holder, int format, int w, int h) {\r\n\t\t\tLog.d(TAG, \"surfaceChanged\");\r\n\r\n\t\t\tinitializeVideo();\r\n\t\t\tstartVideoRecording();\r\n\t\t}\r\n\r\n\t\t/*\r\n\t\t * Destroy State: Take care on release of camera\r\n\t\t * \r\n\t\t * @see\r\n\t\t * android.view.SurfaceHolder.Callback#surfaceDestroyed(android.view.\r\n\t\t * SurfaceHolder)\r\n\t\t */\r\n\t\tpublic void surfaceDestroyed(SurfaceHolder holder) {\r\n\t\t\tLog.d(TAG, \"surfaceDestroyed\");\r\n\r\n\t\t\tstopVideoRecording();\r\n\r\n\t\t}\r\n\t};\r\n\r\n\t// initializeVideo() starts preview and prepare media recorder.\r\n\t// Returns false if initializeVideo fails\r\n\tprivate void initializeVideo() {\r\n\t\tLog.d(TAG, \"initializeVideo: \" + mediaRecorderRecording);\r\n\r\n\t\tmediaRecorderRecording = true;\r\n\r\n\t\tLog.v(TAG, \"initializeVideo set to true: \" + mediaRecorderRecording);\r\n\r\n\t\tif (mediaRecorder == null)\r\n\t\t\tmediaRecorder = new MediaRecorder();\r\n\t\telse\r\n\t\t\tmediaRecorder.reset();\r\n\r\n\r\n\t\tmediaRecorder.setVideoSource(MediaRecorder.VideoSource.CAMERA);\r\n\t\tmediaRecorder.setOutputFormat(MediaRecorder.OutputFormat.THREE_GPP);\r\n\r\n\t\t// route video to LocalSocket\r\n\t\tmediaRecorder.setOutputFile(sender.getFileDescriptor());\r\n\r\n\t\t// Use the same frame rate for both, since internally\r\n\t\t// if the frame rate is too large, it can cause camera to become\r\n\t\t// unstable. We need to fix the MediaRecorder to disable the support\r\n\t\t// of setting frame rate for now.\r\n\r\n\t\tmediaRecorder.setVideoFrameRate(RtspConstants.FPS);\r\n\t\t// mMediaRecorder.setVideoEncodingBitRate(RtspConstants.BITRATE);\r\n\r\n\t\tmediaRecorder.setVideoSize(Integer.valueOf(RtspConstants.WIDTH), Integer.valueOf(RtspConstants.HEIGHT));\r\n\r\n\t\tmediaRecorder.setVideoEncoder(getMediaEncoder());\r\n\t\tmediaRecorder.setPreviewDisplay(mSurfaceHolder.getSurface());\r\n\r\n\t\ttry {\r\n\r\n\t\t\tmediaRecorder.prepare();\r\n\t\t\tmediaRecorder.setOnErrorListener(mErrorListener);\r\n\t\t\tmediaRecorder.start();\r\n\r\n\t\t} catch (IOException exception) {\r\n\t\t\texception.printStackTrace();\r\n\t\t\treleaseMediaRecorder();\r\n\t\t}\r\n\t}\r\n\r\n\tprivate int getMediaEncoder() {\r\n\t\tif (MediaConstants.H264_CODEC == true)\r\n\t\t\treturn MediaRecorder.VideoEncoder.H264;\r\n\t\treturn MediaRecorder.VideoEncoder.H263;\r\n\t}\r\n\r\n\tprivate void startVideoRecording() {\r\n\r\n\t\tLog.v(TAG, \"startVideoRecording\");\r\n\r\n\t\tInputStream fis = null;\r\n\t\ttry {\r\n\t\t\tfis = receiver.getInputStream();\r\n\t\t} catch (IOException e1) {\r\n\t\t\tLog.w(TAG, \"No receiver input stream\");\r\n\t\t\treturn;\r\n\t\t}\r\n\r\n\t\ttry {\r\n\r\n\t\t\t// actually H263 over RTP and H264 over RTP is supported\r\n\t\t\tif (MediaConstants.H264_CODEC == true) {\r\n\t\t\t\tvideoPacketizer = new H264Packetizer(fis);\r\n\t\t\t} else {\r\n\t\t\t\tvideoPacketizer = new H263Packetizer(fis);\r\n\t\t\t}\r\n\t\t\tvideoPacketizer.startStreaming();\r\n\r\n\t\t} catch (SocketException e) {\r\n\t\t\t// TODO Auto-generated catch block\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\r\n\t}\r\n\r\n\t private void stopVideoRecording() {\r\n\r\n\t\t  Log.d(TAG, \"stopVideoRecording\");\r\n\r\n\t\t  if (mediaRecorderRecording || mediaRecorder != null) {\r\n\r\n\t\t   try {\r\n\r\n\t\t    // stop thread\r\n\t\t    videoPacketizer.stopStreaming();\r\n\r\n\t\t    if (mediaRecorderRecording && mediaRecorder != null) {\r\n\t\t     try {\r\n\t\t      mediaRecorder.setOnErrorListener(null);\r\n\t\t      mediaRecorder.setOnInfoListener(null);\r\n\t\t      mediaRecorder.stop();\r\n\t\t     } catch (RuntimeException e) {\r\n\t\t      Log.e(TAG, \"stop fail: \" + e.getMessage());\r\n\t\t     }\r\n\r\n\t\t     mediaRecorderRecording = false;\r\n\t\t    }\r\n\t\t   } catch (Exception e) {\r\n\t\t    Log.e(TAG, \"stopVideoRecording failed\");\r\n\t\t    \r\n\t\t    e.printStackTrace();\r\n\t\t   } finally {\r\n\t\t    releaseMediaRecorder();\r\n\t\t   }\r\n\t\t  }\r\n\t\t }\r\n\tprivate void releaseMediaRecorder() {\r\n\r\n\t\tLog.d(TAG, \"Releasing media recorder.\");\r\n\t\tif (mediaRecorder != null) {\r\n\t\t\tmediaRecorder.reset();\r\n\t\t\tmediaRecorder.release();\r\n\t\t\tmediaRecorder = null;\r\n\t\t}\r\n\t}\r\n\r\n}\r\n"
  },
  {
    "path": "RtspCamera/src/de/kp/rtspcamera/RtspNativeCodecsCamera.java",
    "content": "package de.kp.rtspcamera;\r\n\r\nimport java.io.IOException;\r\n\r\nimport android.app.Activity;\r\nimport android.hardware.Camera;\r\nimport android.os.Bundle;\r\nimport android.util.Log;\r\nimport android.view.SurfaceHolder;\r\nimport android.view.SurfaceView;\r\nimport android.view.Window;\r\nimport android.view.WindowManager;\r\n\r\nimport de.kp.net.rtp.recorder.RtspVideoRecorder;\r\nimport de.kp.net.rtsp.RtspConstants;\r\nimport de.kp.net.rtsp.server.RtspServer;\r\n\r\npublic class RtspNativeCodecsCamera extends Activity {\r\n\r\n\tprivate String TAG = \"RTSPNativeCamera\";\r\n\r\n\t// default RTSP command port is 554\r\n//\tprivate int SERVER_PORT = 8080;\r\n\r\n\tprivate RtspVideoRecorder outgoingPlayer;\r\n\r\n\tprivate SurfaceView mCameraPreview;\r\n\tprivate SurfaceHolder previewHolder;\r\n\r\n\tprivate Camera camera;\r\n\r\n\tprivate boolean inPreview = false;\r\n\tprivate boolean cameraConfigured = false;\r\n\r\n\tprivate int mPreviewWidth = Integer.valueOf(RtspConstants.WIDTH);\r\n\tprivate int mPreviewHeight = Integer.valueOf(RtspConstants.HEIGHT);\r\n\r\n\tprivate RtspServer streamer;\r\n\r\n\t@Override\r\n\tpublic void onCreate(Bundle savedInstanceState) {\r\n\t\tsuper.onCreate(savedInstanceState);\r\n\r\n\t\tLog.d(TAG, \"onCreate\");\r\n\r\n\t\trequestWindowFeature(Window.FEATURE_NO_TITLE);\r\n\t\tWindow win = getWindow();\r\n\t\twin.addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);\r\n\t\twin.setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN, WindowManager.LayoutParams.FLAG_FULLSCREEN);\r\n\r\n\t\tsetContentView(R.layout.cameranativecodecs);\r\n\r\n\t\t/*\r\n\t\t * Camera preview initialization\r\n\t\t */\r\n\t\tmCameraPreview = (SurfaceView) findViewById(R.id.smallcameraview);\r\n\t\tpreviewHolder = mCameraPreview.getHolder();\r\n\t\tpreviewHolder.addCallback(surfaceCallback);\r\n\t\tpreviewHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS);\r\n\r\n//\t\toutgoingPlayer = new RtspVideoRecorder(\"h263-2000\");\r\n\t\toutgoingPlayer = new RtspVideoRecorder(\"h264\");\r\n\t\toutgoingPlayer.open();\r\n\t\t\r\n\r\n\t}\r\n\r\n\t@Override\r\n\tpublic void onResume() {\r\n\t\tLog.d(TAG, \"onResume\");\r\n\t\t\r\n\r\n\t\t// starts the RTSP Server\r\n\r\n\t\ttry {\r\n\r\n\t\t\t// initialize video encoder to be used\r\n\t\t\t// for SDP file generation\r\n\t\t\tRtspConstants.VideoEncoder rtspVideoEncoder = (MediaConstants.H264_CODEC == true) ? RtspConstants.VideoEncoder.H264_ENCODER\r\n\t\t\t\t\t: RtspConstants.VideoEncoder.H263_ENCODER;\r\n\r\n\t\t\tif (streamer == null) {\r\n\t\t\t\tstreamer = new RtspServer(RtspConstants.SERVER_PORT, rtspVideoEncoder);\r\n\t\t\t\tnew Thread(streamer).start();\r\n\t\t\t}\r\n\r\n\t\t\tLog.d(TAG, \"RtspServer started\");\r\n\r\n\t\t} catch (IOException e) {\r\n\t\t\t// TODO Auto-generated catch block\r\n\t\t\te.printStackTrace();\r\n\t\t}\r\n\r\n\t\t/*\r\n\t\t * Camera initialization\r\n\t\t */\r\n\t\tcamera = Camera.open();\r\n\r\n\t\tsuper.onResume();\r\n\r\n\t}\r\n\r\n\t@Override\r\n\tpublic void onPause() {\r\n\r\n\t\t// stop RTSP server\r\n\t\tif (streamer != null)\r\n\t\t\tstreamer.stop();\r\n\t\tstreamer = null;\r\n\r\n\t\tsuper.onPause();\r\n\t}\r\n\r\n\t/*\r\n\t * SurfaceHolder callback triple\r\n\t */\r\n\tSurfaceHolder.Callback surfaceCallback = new SurfaceHolder.Callback() {\r\n\t\t/*\r\n\t\t * Created state: - Open camera - initial call to startPreview() - hook\r\n\t\t * PreviewCallback() on it, which notifies waiting thread with new\r\n\t\t * preview data - start thread\r\n\t\t * \r\n\t\t * @see android.view.SurfaceHolder.Callback#surfaceCreated(android.view.\r\n\t\t * SurfaceHolder )\r\n\t\t */\r\n\t\tpublic void surfaceCreated(SurfaceHolder holder) {\r\n\t\t\tLog.d(TAG, \"surfaceCreated\");\r\n\r\n\t\t}\r\n\r\n\t\t/*\r\n\t\t * Changed state: - initiate camera preview size, set\r\n\t\t * camera.setPreviewDisplay(holder) - subsequent call to startPreview()\r\n\t\t * \r\n\t\t * @see android.view.SurfaceHolder.Callback#surfaceChanged(android.view.\r\n\t\t * SurfaceHolder , int, int, int)\r\n\t\t */\r\n\t\tpublic void surfaceChanged(SurfaceHolder holder, int format, int w, int h) {\r\n\t\t\tLog.d(TAG, \"surfaceChanged\");\r\n\t\t\tinitializePreview(w, h);\r\n\t\t\tstartPreview();\r\n\t\t}\r\n\r\n\t\t/*\r\n\t\t * Destroy State: Take care on release of camera\r\n\t\t * \r\n\t\t * @see\r\n\t\t * android.view.SurfaceHolder.Callback#surfaceDestroyed(android.view.\r\n\t\t * SurfaceHolder)\r\n\t\t */\r\n\t\tpublic void surfaceDestroyed(SurfaceHolder holder) {\r\n\t\t\tLog.d(TAG, \"surfaceDestroyed\");\r\n\r\n\t\t\tif (inPreview) {\r\n\t\t\t\tcamera.stopPreview();\r\n\t\t\t}\r\n\t\t\tcamera.setPreviewCallback(null);\r\n\t\t\tcamera.release();\r\n\t\t\tcamera = null;\r\n\t\t\t\r\n\t\t\t// stop captureThread\r\n\t\t\toutgoingPlayer.stop();\r\n\r\n\r\n\t\t\tinPreview = false;\r\n\t\t\tcameraConfigured = false;\r\n\r\n\t\t}\r\n\t};\r\n\r\n\r\n\t/**\r\n\t * This method checks availability of camera and preview\r\n\t * \r\n\t * @param width\r\n\t * @param height\r\n\t */\r\n\tprivate void initializePreview(int width, int height) {\r\n\t\tLog.d(TAG, \"initializePreview\");\r\n\r\n\t\tif (camera != null && previewHolder.getSurface() != null) {\r\n\t\t\ttry {\r\n\t\t\t\t// provide SurfaceView for camera preview\r\n\t\t\t\tcamera.setPreviewDisplay(previewHolder);\r\n\r\n\t\t\t} catch (Throwable t) {\r\n\t\t\t\tLog.e(TAG, \"Exception in setPreviewDisplay()\", t);\r\n\t\t\t}\r\n\r\n\t\t\tif (!cameraConfigured) {\r\n\r\n\t\t\t\tCamera.Parameters parameters = camera.getParameters();\r\n\t\t\t\tparameters.setPreviewSize(mPreviewWidth, mPreviewHeight);\r\n\r\n\t\t\t\tcamera.setParameters(parameters);\r\n\t\t\t\tcameraConfigured = true;\r\n\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\tprivate void startPreview() {\r\n\t\tLog.d(TAG, \"startPreview\");\r\n\r\n\t\tif (cameraConfigured && camera != null) {\r\n\r\n\t\t\t// activate onPreviewFrame()\r\n\t\t\t// camera.setPreviewCallback(cameraPreviewCallback);\r\n\t\t\tcamera.setPreviewCallback(outgoingPlayer);\r\n\t\t\t\r\n\t\t\t// start captureThread\r\n\t\t\toutgoingPlayer.start();\r\n\r\n\t\t\tcamera.startPreview();\r\n\t\t\tinPreview = true;\r\n\r\n\t\t}\r\n\t}\r\n\r\n\r\n\t\r\n\tpublic boolean isReady() {\r\n\t\treturn this.inPreview;\r\n\t}\r\n\r\n}\r\n"
  },
  {
    "path": "RtspViewer/.classpath",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n<classpath>\r\n\t<classpathentry kind=\"src\" path=\"src\"/>\r\n\t<classpathentry kind=\"src\" path=\"gen\"/>\r\n\t<classpathentry kind=\"con\" path=\"com.android.ide.eclipse.adt.ANDROID_FRAMEWORK\"/>\r\n\t<classpathentry kind=\"con\" path=\"com.android.ide.eclipse.adt.LIBRARIES\"/>\r\n\t<classpathentry kind=\"output\" path=\"bin/classes\"/>\r\n</classpath>\r\n"
  },
  {
    "path": "RtspViewer/.gitignore",
    "content": "/bin\n/gen\n"
  },
  {
    "path": "RtspViewer/.project",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\r\n<projectDescription>\r\n\t<name>RtspViewer</name>\r\n\t<comment></comment>\r\n\t<projects>\r\n\t</projects>\r\n\t<buildSpec>\r\n\t\t<buildCommand>\r\n\t\t\t<name>com.android.ide.eclipse.adt.ResourceManagerBuilder</name>\r\n\t\t\t<arguments>\r\n\t\t\t</arguments>\r\n\t\t</buildCommand>\r\n\t\t<buildCommand>\r\n\t\t\t<name>com.android.ide.eclipse.adt.PreCompilerBuilder</name>\r\n\t\t\t<arguments>\r\n\t\t\t</arguments>\r\n\t\t</buildCommand>\r\n\t\t<buildCommand>\r\n\t\t\t<name>org.eclipse.jdt.core.javabuilder</name>\r\n\t\t\t<arguments>\r\n\t\t\t</arguments>\r\n\t\t</buildCommand>\r\n\t\t<buildCommand>\r\n\t\t\t<name>com.android.ide.eclipse.adt.ApkBuilder</name>\r\n\t\t\t<arguments>\r\n\t\t\t</arguments>\r\n\t\t</buildCommand>\r\n\t</buildSpec>\r\n\t<natures>\r\n\t\t<nature>com.android.ide.eclipse.adt.AndroidNature</nature>\r\n\t\t<nature>org.eclipse.jdt.core.javanature</nature>\r\n\t</natures>\r\n</projectDescription>\r\n"
  },
  {
    "path": "RtspViewer/AndroidManifest.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\r\n<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\"\r\n    package=\"de.kp.rtspviewer\"\r\n    android:versionCode=\"1\"\r\n    android:versionName=\"1.0\" >\r\n\r\n    <uses-sdk android:minSdkVersion=\"10\" />\r\n    <uses-permission android:name=\"android.permission.INTERNET\" />\r\n    \r\n    <application\r\n        android:icon=\"@drawable/icon\"\r\n        android:label=\"@string/app_name\" android:debuggable=\"true\">\r\n        <activity\r\n            android:name=\".RtspViewerActivity\"\r\n            android:label=\"@string/app_name\" >\r\n            <intent-filter>\r\n                <action android:name=\"android.intent.action.MAIN\" />\r\n\r\n                <category android:name=\"android.intent.category.LAUNCHER\" />\r\n            </intent-filter>\r\n        </activity>\r\n    </application>\r\n\r\n</manifest>"
  },
  {
    "path": "RtspViewer/gpl.txt",
    "content": "                    GNU GENERAL PUBLIC LICENSE\n                       Version 3, 29 June 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU General Public License is a free, copyleft license for\nsoftware and other kinds of works.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nthe GNU General Public License is intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.  We, the Free Software Foundation, use the\nGNU General Public License for most of our software; it applies also to\nany other work released this way by its authors.  You can apply it to\nyour programs, too.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  To protect your rights, we need to prevent others from denying you\nthese rights or asking you to surrender the rights.  Therefore, you have\ncertain responsibilities if you distribute copies of the software, or if\nyou modify it: responsibilities to respect the freedom of others.\n\n  For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must pass on to the recipients the same\nfreedoms that you received.  You must make sure that they, too, receive\nor can get the source code.  And you must show them these terms so they\nknow their rights.\n\n  Developers that use the GNU GPL protect your rights with two steps:\n(1) assert copyright on the software, and (2) offer you this License\ngiving you legal permission to copy, distribute and/or modify it.\n\n  For the developers' and authors' protection, the GPL clearly explains\nthat there is no warranty for this free software.  For both users' and\nauthors' sake, the GPL requires that modified versions be marked as\nchanged, so that their problems will not be attributed erroneously to\nauthors of previous versions.\n\n  Some devices are designed to deny users access to install or run\nmodified versions of the software inside them, although the manufacturer\ncan do so.  This is fundamentally incompatible with the aim of\nprotecting users' freedom to change the software.  The systematic\npattern of such abuse occurs in the area of products for individuals to\nuse, which is precisely where it is most unacceptable.  Therefore, we\nhave designed this version of the GPL to prohibit the practice for those\nproducts.  If such problems arise substantially in other domains, we\nstand ready to extend this provision to those domains in future versions\nof the GPL, as needed to protect the freedom of users.\n\n  Finally, every program is threatened constantly by software patents.\nStates should not allow patents to restrict development and use of\nsoftware on general-purpose computers, but in those that do, we wish to\navoid the special danger that patents applied to a free program could\nmake it effectively proprietary.  To prevent this, the GPL assures that\npatents cannot be used to render the program non-free.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Use with the GNU Affero General Public License.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU Affero General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the special requirements of the GNU Affero General Public License,\nsection 13, concerning interaction through a network will apply to the\ncombination as such.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU General Public License from time to time.  Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU General Public License for more details.\n\n    You should have received a copy of the GNU General Public License\n    along with this program.  If not, see <http://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If the program does terminal interaction, make it output a short\nnotice like this when it starts in an interactive mode:\n\n    <program>  Copyright (C) <year>  <name of author>\n    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n    This is free software, and you are welcome to redistribute it\n    under certain conditions; type `show c' for details.\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License.  Of course, your program's commands\nmight be different; for a GUI interface, you would use an \"about box\".\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU GPL, see\n<http://www.gnu.org/licenses/>.\n\n  The GNU General Public License does not permit incorporating your program\ninto proprietary programs.  If your program is a subroutine library, you\nmay consider it more useful to permit linking proprietary applications with\nthe library.  If this is what you want to do, use the GNU Lesser General\nPublic License instead of this License.  But first, please read\n<http://www.gnu.org/philosophy/why-not-lgpl.html>.\n"
  },
  {
    "path": "RtspViewer/proguard-project.txt",
    "content": "# To enable ProGuard in your project, edit project.properties\n# to define the proguard.config property as described in that file.\n#\n# Add project specific ProGuard rules here.\n# By default, the flags in this file are appended to flags specified\n# in ${sdk.dir}/tools/proguard/proguard-android.txt\n# You can edit the include path and order by changing the ProGuard\n# include property in project.properties.\n#\n# For more details, see\n#   http://developer.android.com/guide/developing/tools/proguard.html\n\n# Add any project specific keep options here:\n\n# If your project uses WebView with JS, uncomment the following\n# and specify the fully qualified class name to the JavaScript interface\n# class:\n#-keepclassmembers class fqcn.of.javascript.interface.for.webview {\n#   public *;\n#}\n"
  },
  {
    "path": "RtspViewer/project.properties",
    "content": "# This file is automatically generated by Android Tools.\n# Do not modify this file -- YOUR CHANGES WILL BE ERASED!\n#\n# This file must be checked in Version Control Systems.\n#\n# To customize properties used by the Ant build system edit\n# \"ant.properties\", and override values to adapt the script to your\n# project structure.\n#\n# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):\n#proguard.config=${sdk.dir}\\tools\\proguard\\proguard-android.txt:proguard-project.txt\n\n# Project target.\ntarget=android-10\nandroid.library.reference.1=../RtspCamera\n"
  },
  {
    "path": "RtspViewer/res/layout/videoview.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\r\n<LinearLayout xmlns:android=\"http://schemas.android.com/apk/res/android\"\r\n    android:layout_width=\"fill_parent\"\r\n    android:layout_height=\"fill_parent\"\r\n    android:orientation=\"vertical\" >\r\n\r\n    <com.orangelabs.rcs.service.api.client.media.video.VideoSurfaceView\r\n        android:id=\"@+id/incoming_video_view\"\r\n        android:layout_width=\"fill_parent\"\r\n        android:layout_height=\"fill_parent\"\r\n        />\r\n\r\n</LinearLayout>"
  },
  {
    "path": "RtspViewer/res/values/strings.xml",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\r\n<resources>\r\n\r\n    <string name=\"hello\">Hello World, RtspViewerActivity!</string>\r\n    <string name=\"app_name\">RtspViewer</string>\r\n\r\n</resources>"
  },
  {
    "path": "RtspViewer/src/de/kp/rtspviewer/RtspViewerActivity.java",
    "content": "package de.kp.rtspviewer;\n\n/**\n * This is the most minimal viewer for RtspCamera app\n * \n * @author Peter Arwanitis (arwanitis@dr-kruscheundpartner.de)\n *\n */\nimport android.app.Activity;\nimport android.os.Bundle;\nimport android.util.Log;\n\nimport com.orangelabs.rcs.platform.AndroidFactory;\nimport com.orangelabs.rcs.provider.settings.RcsSettings;\nimport com.orangelabs.rcs.service.api.client.media.video.VideoSurfaceView;\n\nimport de.kp.net.rtp.viewer.RtpVideoRenderer;\n\npublic class RtspViewerActivity extends Activity {\n\n\t/**\n\t * Video renderer\n\t */\n\tprivate RtpVideoRenderer incomingRenderer = null;\n\n\t/**\n\t * Video preview\n\t */\n\tprivate VideoSurfaceView incomingVideoView = null;\n\n\t/**\n\t * hardcoded rtsp server path\n\t */\n\tprivate String rtspConnect = \"rtsp://192.168.178.47:8080/video\";\n\t// private String rtsp =\n\t// \"rtsp://184.72.239.149/vod/mp4:BigBuckBunny_175k.mov\";\n\n\tprivate int videoHeight;\n\n\tprivate int videoWidth;\n\n\tprivate String TAG = \"RtspViewer\";\n\n\t@Override\n\tpublic void onCreate(Bundle icicle) {\n\n\t\tLog.i(TAG, \"onCreate\");\n\n\t\tsuper.onCreate(icicle);\n\n\t\t// Set application context ... skipping FileFactory\n\t\tAndroidFactory.setApplicationContext(getApplicationContext());\n\n\t\t// Instantiate the settings manager\n\t\tRcsSettings.createInstance(getApplicationContext());\n\n\t\tsetContentView(R.layout.videoview);\n\n\t\t// <string\n\t\t// name=\"rcs_settings_label_default_video_format\">h263-2000</string>\n\t\t// <string-array name=\"rcs_settings_list_video_format_value\">\n\t\t// <item>h263-2000</item>\n\t\t// <item>h264</item>\n\t\t// </string-array>\n\t\t// <string-array name=\"rcs_settings_list_video_format_label\">\n\t\t// <item>Low (H.263)</item>\n\t\t// <item>High (H.264)</item>\n\t\t// </string-array>\n\t\t//\n\t\t// <string name=\"rcs_settings_label_default_video_size\">QCIF</string>\n\t\t// <string-array name=\"rcs_settings_list_video_size_value\">\n\t\t// <item>QCIF</item>\n\t\t// <!-- <item>QVGA</item> -->\n\t\t// </string-array>\n\t\t// <string-array name=\"rcs_settings_list_video_size_label\">\n\t\t// <item>Low (176x144)</item>\n\t\t// <!-- <item>High (320x240)</item> -->\n\t\t// </string-array>\n\n\t\t// Set incoming video preview\n\t\tif (incomingVideoView == null) {\n\t\t\tincomingVideoView = (VideoSurfaceView) findViewById(R.id.incoming_video_view);\n\t\t\tincomingVideoView.setAspectRatio(videoWidth, videoHeight);\n\n\t\t\ttry {\n\t\t\t\tincomingRenderer = new RtpVideoRenderer(rtspConnect);\n\n\t\t\t} catch (Exception e) {\n\t\t\t\t// TODO Auto-generated catch block\n\t\t\t\te.printStackTrace();\n\t\t\t}\n\t\t\tincomingRenderer.setVideoSurface(incomingVideoView);\n\t\t}\n\n\t}\n\n\t@Override\n\tprotected void onPause() {\n\t\tLog.i(TAG, \"onPause\");\n\t\tsuper.onPause();\n\t}\n\t\n\t@Override\n\tprotected void onResume() {\n\t\tLog.i(TAG, \"onResume\");\n\t\tsuper.onResume();\n\n\t\tincomingRenderer.open();\n\t\tincomingRenderer.start();\n\n\t\tLog.i(TAG, \"onResume renderer started\");\n\n\t}\n\n\t@Override\n\tpublic void onDestroy() {\n\t\tLog.i(TAG, \"onDestroy\");\n\n\t\tsuper.onDestroy();\n\n\t\tincomingRenderer.stop();\n\t\tincomingRenderer.close();\n\n\t}\n\n}\n"
  }
]